从TripAdvisor抓取评论



假设我正在从url

中删除评论

https://www.tripadvisor.com/hotel_review-g562819-d289642-reviews-hotel_caserio-caserio-playa_del_maspalomas_maspalomas_gran_gran_gran_canaria_canaria_canary_canary_islany_islands.html

它的内容不包含我要刮擦的评论。因此,我该如何刮擦所有下一页的评论。

我使用了以下代码,但仍然仅显示第一页中的评论!

from bs4 import BeautifulSoup
import requests
URL_BASE = "https://www.tripadvisor.com/Hotel_Review-g562819-d289642-Reviews-Hotel_Caserio-Playa_del_Ingles_Maspalomas_Gran_Canaria_Canary_Islands.html"
MAX_PAGES = 30
counter = 0
for i in range(1, MAX_PAGES):
if i > 1:
    url = "%spage/%d/" % (URL_BASE, i)
else:
    url = URL_BASE
req = requests.get(url)
statusCode = req.status_code
if statusCode == 200:
    html = BeautifulSoup(req.text, "html.parser")
    resultsoup = html.find_all('P', {'class': 'partial_entry'})
else:
    break
for review in resultsoup:
review_list = review.get_text()
print(review_list)

基于scrapy的示例。

服务器添加到URL(在.html之前的任何位置)

  • -or5获取第二页,
  • -or10获取第三页,

等。

您甚至可以跳过单词(用于SEO),仅使用

https://www.tripadvisor.com/g562819-d289642-or5.html
https://www.tripadvisor.com/g562819-d289642-or10.html

获得评论的下一个页面。

from bs4 import BeautifulSoup
import requests
import re
#import webbrowser
def get_soup(url):
    headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0'}
    r = s.get(url, headers=headers)
    #with open('temp.html', 'wb') as f:
    #    f.write(r.content)
    #    webbrowser.open('temp.html')
    if r.status_code != 200:
        print('status code:', r.status_code)
    else:
        return BeautifulSoup(r.text, 'html.parser')
def parse(url, response):
    if not response:
        print('no response:', url)
        return
    # get number of reviews
    num_reviews = response.find('span', class_='reviews_header_count').text
    num_reviews = num_reviews[1:-1] # remove `( )`
    num_reviews = num_reviews.replace(',', '') # remove `,`
    num_reviews = int(num_reviews)
    print('num_reviews:', num_reviews, type(num_reviews))
    # create template for urls to pages with reviews
    url = url.replace('.html', '-or{}.html')
    print('template:', url)
    # load pages with reviews
    for offset in range(0, num_reviews, 5):
        print('url:', url.format(offset))
        url_ = url.format(offset)
        parse_reviews(url_, get_soup(url_))
        return # for test only - to stop after first page
def parse_reviews(url, response):
    print('review:', url)
    if not response:
        print('no response:', url)
        return
    # get every review
    for idx, review in enumerate(response.find_all('div', class_='review-container')):
        item = {
            'hotel_name': response.find('h1', class_='heading_title').text,
            'review_title': review.find('span', class_='noQuotes').text,
            'review_body': review.find('p', class_='partial_entry').text,
            'review_date': review.find('span', class_='relativeDate')['title'],#.text,#[idx],
            'num_reviews_reviewer': review.find('span', class_='badgetext').text,
            'reviewer_name': review.find('span', class_='scrname').text,
            'bubble_rating': review.select_one('div.reviewItemInline span.ui_bubble_rating')['class'][1][7:],
        }
        results.append(item) # <--- add to global list
        #~ yield item
        for key,val in item.items():
            print(key, ':', val)
        print('----')
        #return # for test only - to stop after first review

# --- main ---
s = requests.Session()
start_urls = [
    'https://www.tripadvisor.com/Hotel_Review-g562819-d289642-Reviews-Hotel_Caserio-Playa_del_Ingles_Maspalomas_Gran_Canaria_Canary_Islands.html',
    #'https://www.tripadvisor.com/Hotel_Review-g60795-d102542-Reviews-Courtyard_Philadelphia_Airport-Philadelphia_Pennsylvania.html',
    #'https://www.tripadvisor.com/Hotel_Review-g60795-d122332-Reviews-The_Ritz_Carlton_Philadelphia-Philadelphia_Pennsylvania.html',
]
results = [] # <--- global list for items
for url in start_urls:
    parse(url, get_soup(url))
import pandas as pd
df = pd.DataFrame(results) # <--- convert list to DataFrame
df.to_csv('output.csv')    # <--- save in file

最新更新