如何使用URL中的页码刮取多个页面

问题描述 投票:1回答:2

当页码在URL中时,如何刮取多个页面?

例如:

https://www.cars.com/for-sale/searchresults.action/?mdId=21811&mkId=20024&page=**1**&perPage=100&rd=99999&searchSource=PAGINATION&showMore=false&sort=relevance&stkTypId=28880&zc=11209

我的代码:

import requests
from bs4 import BeautifulSoup
from csv import writer

response = requests.get('https://www.cars.com/for-sale/searchresults.action/?mdId=21811&mkId=20024&page=1&perPage=100&rd=99999&searchSource=PAGINATION&showMore=false&sort=relevance&stkTypId=28880&zc=11209')

soup = BeautifulSoup(response.text, 'html.parser')

posts = soup.find_all(class_='shop-srp-listings__inner')

with open('posts.csv', 'w') as csv_file:
    csv_writer = writer(csv_file)
    headers = ['title', 'color', 'price']
    csv_writer.writerow(headers)

    for post in posts:
        title = post.find(class_="listing-row__title").get_text().replace('\n', '').strip()
        # color = post.find("li").get_text().replace('\n', '')
        price = post.find("span", attrs={"class": "listing-row__price"}).get_text().replace('\n', '').strip()
        print(title, price)
        # csv_writer.writerow([title, color, price])

谢谢你的帮助

python web-scraping beautifulsoup
2个回答
1
投票
page = 0
for x in range(25):
    page+=1
    url = ('https://www.cars.com/for-sale/searchresults.action/?mdId=21811&mkId=20024&page='+str(page)+
    '&perPage=100&rd=99999&searchSource=PAGINATION&showMore=false&sort=relevance&stkTypId=28880&zc=11209')
    print(url)
    #requests.get(url)

0
投票

获取总页数并迭代请求到每个页面。

import requests
from bs4 import BeautifulSoup
from csv import writer

with open('posts.csv', 'w') as csv_file:
    csv_writer = writer(csv_file)
    headers = ['title', 'color', 'price']
    csv_writer.writerow(headers)

    response = requests.get('https://www.cars.com/for- sale/searchresults.action/?mdId=21811&mkId=20024&page=1&perPage=100&rd=99999&searchSource=PAGINATION&showMorefalse&sort=relevance&stkTypId=28880&zc=11209')

    soup = BeautifulSoup(response.text, 'html.parser')

    number_of_pages = soup.find_all(class_='js-last-page')

    for page in range(1, number_of_pages+1):

        response = requests.get('https://www.cars.com/for- sale/searchresults.action/?mdId=21811&mkId=20024&page='+ str(page)+'&perPage=100&rd=99999&searchSource=PAGINATION&showMorefalse&sort=relevance&stkTypId=28880&zc=11209')

        soup = BeautifulSoup(response.text, 'html.parser')

        posts = soup.find_all(class_='shop-srp-listings__inner')

        for post in posts:
            title = post.find(class_="listing-row__title").get_text().replace('\n', '').strip()
            # color = post.find("li").get_text().replace('\n', '')
            price = post.find("span", attrs={"class": "listing-row__price"}).get_text().replace('\n', '').strip()
            print(title, price)
            # csv_writer.writerow([title, color, price])
© www.soinside.com 2019 - 2024. All rights reserved.