我正在尝试抓取 bestbuy.com,并且我只能抓取一页而不是多个页面

问题描述 投票:0回答:3
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
import configparser
from datetime import datetime, timedelta, date
import time
import json
import requests



parser = configparser.RawConfigParser()

parser.read('config.ini')
page=parser['PROPERTIES']['PAGE']
url= parser['PROPERTIES']['URL']
OMIT_KEYWORDS= parser['FILTERS']['OMIT'].split(',')
INCLUDE_KEYWORDS=parser['FILTERS']['INCLUDE'].split(',')
END_DATE = datetime.strptime(parser['DATE']['END'], '%Y-%m-%d')
START_DATE=datetime.strptime(parser['DATE']['START'],'%Y-%m-%d')
minimum_comment_length = int(parser['PROPERTIES']['MIN_COMMENT_LENGTH'])
maximum_comment_length = int(parser['PROPERTIES']['MAX_COMMENT_LENGTH'])

# Setting up driver options
options = webdriver.ChromeOptions()
# Setting up Path to chromedriver executable file
CHROMEDRIVER_PATH =r'C:\Users\HP\Desktop\INTERNSHIP\Target\chromedriver.exe'
# Adding options
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option("useAutomationExtension", False)
# Setting up chrome service
service = ChromeService(executable_path=CHROMEDRIVER_PATH)
# Establishing Chrom web driver using set services and options
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 20)      
driver.implicitly_wait(10)
time.sleep(2)
driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
item_list=[]  

driver.get(url)
reviews = wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, ".review-item")))
time.sleep(2)


   

for review in reviews:
    this_review_date_string= review.find_element_by_xpath(".//time[contains(@class,'submission-date')]") 
    this_review_date_string_ago= this_review_date_string.text
    date_today= date.today()
    

    if "month" in this_review_date_string_ago:
        date_period_string = this_review_date_string_ago.split("month")[0]
        date_period_int = int(date_period_string)*30
        temp_review_date = date_today - timedelta(days=date_period_int)

    elif "day" in this_review_date_string_ago: 
        date_period_string=this_review_date_string_ago.split("day")[0]
        date_period_int = int(date_period_string)
        temp_review_date = date_today - timedelta(days=date_period_int)

    elif "hour" in this_review_date_string_ago: 
        date_period_string=this_review_date_string_ago.split("hour")[0]
        date_period_int = int(date_period_string)
        temp_review_date = date_today - timedelta(hours=date_period_int)

    elif "year" in this_review_date_string_ago:
        date_period_string=this_review_date_string_ago.split("year")[0]
        date_period_int = int(date_period_string)*365
        temp_review_date = date_today - timedelta(days=date_period_int)

    this_review_datetime = temp_review_date.strftime('%d %B %Y')
    current_date= datetime.strptime( this_review_datetime, '%d %B %Y')


    if  (START_DATE< current_date < END_DATE):
            item={  
                'stars': review.find_element_by_xpath(".//p[contains(@class,'visually-hidden')]").text.replace("out of 5 stars","").replace("Rated",""),
                'username': review.find_element_by_xpath(".//div[contains(@class,'ugc-author v-fw-medium body-copy-lg')]").text,
                'userurl':"NA",
                'title':review.find_element_by_xpath(".//h4[contains(@class,'c-section-title review-title heading-5 v-fw-medium')]").text,
                'review_text':review.find_element_by_xpath(".//div[contains(@class,'ugc-review-body')]//p[contains(@class,'pre-white-space')]").text,
                'permalink': "NA",
                'reviewlocation': "NA",
                'reviewdate': this_review_datetime,
                'subproductname': "NA",
                'subproductlink': "NA",
            }
            item_list.append(item)
            
print(item_list)
with open("output.json","r+") as outfile:
    json.dump(item_list,outfile) 

我想从所有页面上抓取评论,但现在我只得到一页评论。我用于抓取的链接是 https://www.bestbuy.com/site/reviews/bella-pro-series-12-6-qt-digital-air-fryer-oven-stainless-steel/6412331?variant =A&skuId=6412331&page=1。我想分页。请告诉我如何运行循环,以便我可以抓取所有页面。

python selenium selenium-webdriver web-scraping
3个回答
1
投票
page=2
while True:
    try:
        #your code
        driver.find_element(By.XPATH,f"//a[text()='{page}']").click()
        page+=1
    except:
        break

应该是单击带有新页码的标签的简单方法。

<a aria-label="Page 1" class="" data-track="Page 1" href="/site/reviews/bella-pro-series-12-6-qt-digital-air-fryer-oven-stainless-steel/6412331?variant=A&amp;skuId=6412331&amp;page=8&amp;page=1&amp;pageSize=20&amp;sku=6412331&amp;sort=BEST_REVIEW&amp;variant=A">1</a>

1
投票

我写的Python代码不多,所以它不会非常清晰。您只需要转到具有动态页码的页面(从范围为 200-300 开始),每当您看到页面上没有找到评论元素时,就假设评论结束,然后转到下一个产品。 ...

item_list=[]
def getPage():
  for i in range(arbitary_number):
    time.sleep(2)
    driver.execute_script(
      "window.scrollTo(0,document.body.scrollHeight)")
    url = https://www.bestbuy.com/site/reviews/bella-pro-series-12-6-qt-digital-air-fryer-oven-stainless-steel/6412331?variant=A&skuId=6412331&page={i}
    driver.get(url)
reviews = wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, ".review-item")))
time.sleep(2)

0
投票

导入请求 从 bs4 导入 BeautifulSoup

对于范围 (1, 6) 中的页面: soup = BeautifulSoup(requests.get(f"https://www.bestbuy.com/site/searchpage.jsp?st=laptop&page={page}").text, "html.parser") for p in soup.select(".sku-item"): print(p.select_one(".sku-title").text, p.select_one(".priceView-customer-price").text)

© www.soinside.com 2019 - 2024. All rights reserved.