在Scrapy中使用process_value进行链接提取。

问题描述 投票:0回答:1

我想用scrapy从myntra.com提取数据。到目前为止,我的代码是

# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule


class VideoSpider(CrawlSpider):
    name = 'video'
    allowed_domains = ['myntra.com']

    user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'

    # def __init__(self, url = ""):
    #         # self.input = input  # source file name
    #         self.url = url
    #         # self.last = last

    def start_requests(self):
        # yield scrapy.Request(url='https://www.amazon.in/gp/bestsellers/videogames/ref=zg_bs_nav_0', headers={
        #     'User-Agent': self.user_agent
        # })
        yield scrapy.Request(url=self.url, headers={
            'User-Agent': self.user_agent
        }, callback=self.parse)
    #    with open("./Input/amazon.csv") as f:
    #     for line in f:
    #         category, url = line.split(',')
    #         category = category.strip()
    #         url = url.strip()
    #         yield scrapy.Request(url=url, headers={
    #             'User-Agent': self.user_agent
    #         }, meta={'urlkey':category})


    rules = (
        Rule(LinkExtractor(restrict_xpaths="//li[@class='product-base']", process_value=lambda x :"https://www.myntra.com/" +x), callback='parse_item', follow=True, process_request='set_user_agent'), # have tried //li[@class='product-base']/a/@href and //li[@class='product-base']/a[1] as well for restricted_xpaths
        Rule(LinkExtractor(restrict_xpaths="//li[@class='pagination-next']/a"), process_request='set_user_agent')
    )

    # def parse_start(self, response):
    #     print(response)
    #     all_links = response.xpath('//li[@class="product-base"]/a/@href').extract()
    #     print(all_links)
    #     for link in all_links:
    #         yield scrapy.Request(url='myntra.com'+link, callback=self.parse_item)
        # return super().parse_start_url(response)
    # def parse_fail(self, response):
    #     print(response.url)
        # all_links = response.xpath('//li[@class="product-base"]/a/@href').extract()
        # print(all_links)
        # for link in all_links:
        #     yield scrapy.Request(url='myntra.com'+link, callback=self.parse_item)

    def set_user_agent(self, request):
        request.headers['User-Agent'] = self.user_agent
        return request

    # def process_values(self,value):
    #     print(value)
    #     value = "https://www.myntra.com/" + value
    #     print(value)
    #     return value

    # def link_add(self, links):
    #     print(links)


    def parse_item(self, response):
        # yield {
        #     'title':response.xpath("normalize-space(//span[@class='a-size-large']/text())").get(),
        #     'brand':response.xpath("normalize-space(//div[@class='a-section a-spacing-none']/a/text())").get(),
        #     'product-specification':response.xpath("normalize-space(//ul[@class='a-unordered-list a-vertical a-spacing-mini']/li/span/text())").get(),
        #     'product-description':response.xpath("normalize-space(//div[@class='a-row feature']/div[2]/p/text())").get(),
        #     'user-agent':response.request.headers['User-Agent']
        # }
        item = dict()
        item['title'] = response.xpath("//div[@class='pdp-price-info']/h1/text()").extract()
        item['price'] = response.xpath("normalize-space(//span[@class='pdp-price']/strong/text())").extract()
        item['product-specification'] = response.xpath("//div[@class='index-tableContainer']/div/div/text()").extract()
        item['product-specification'] = [p.replace("\t", "") for p in item['product-specification']]
        yield item
        # yield {
        #     'title':response.xpath("normalize-space(//span[@class='a-size-large']/text())").extract(),
        #     'brand':response.xpath("normalize-space(//div[@class='a-section a-spacing-none']/a/text())").extract(),
        #     'product-specification':response.xpath("//ul[@class='a-unordered-list a-vertical a-spacing-mini']/li/span/text()").extract(),
        #     'product-description':response.xpath("normalize-space(//div[@class='a-row feature']/div[2]/p/text())").extract(),
        # }


# //div[@class="search-searchProductsContainer row-base"]//section//ul//li[@class="product-base"]//a//@href

代码中的注释显示了我所有的尝试。

启动网址 网址

链接提取器中使用的href的xpath是 /li[@class='产品库']a@href. 但是,问题是hrefref需要附加上 https:/myntra.com 前面的链接提取器的提取值,因此过程_value的lambda函数。但是,这段代码并没有运行。

輸出

2020-05-26 02:52:12 [scrapy.core.engine] INFO: Spider opened
2020-05-26 02:52:12 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2020-05-26 02:52:12 [scrapy.extensions.telnet] INFO: Telnet console listening on 127.0.0.1:6023
2020-05-26 02:52:12 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.myntra.com/robots.txt> (referer: None)
2020-05-26 02:52:13 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.myntra.com/men-footwear> (referer: None)
2020-05-26 02:52:13 [scrapy.core.engine] INFO: Closing spider (finished)
2020-05-26 02:52:13 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 1023,
 'downloader/request_count': 2,
 'downloader/request_method_count/GET': 2,
 'downloader/response_bytes': 87336,
 'downloader/response_count': 2,
 'downloader/response_status_count/200': 2,
 'elapsed_time_seconds': 0.76699,
 'finish_reason': 'finished',
 'finish_time': datetime.datetime(2020, 5, 25, 21, 22, 13, 437855),
 'log_count/DEBUG': 2,
 'log_count/INFO': 10,
 'log_count/WARNING': 1,
 'memusage/max': 51507200,
 'memusage/startup': 51507200,
 'response_received_count': 2,
 'robotstxt/request_count': 1,
 'robotstxt/response_count': 1,
 'robotstxt/response_status_count/200': 1,
 'scheduler/dequeued': 1,
 'scheduler/dequeued/memory': 1,
 'scheduler/enqueued': 1,
 'scheduler/enqueued/memory': 1,
 'start_time': datetime.datetime(2020, 5, 25, 21, 22, 12, 670865)}
2020-05-26 02:52:13 [scrapy.core.engine] INFO: Spider closed (finished)

任何帮助将被感激。

python web-scraping lambda scrapy web-crawler
1个回答
2
投票

这个页面使用JavaScript来添加项目,但它并没有从外部文件中读取,但它的所有数据都在标签中。<script>

import requests
from bs4 import BeautifulSoup
import json

base_url = "https://www.myntra.com/men-footwear"

r = requests.get(base_url)

soup = BeautifulSoup(r.text, 'html.parser')

# get .text
scripts = soup.find_all('script')[8].text

# remove window.__myx = 
script = scripts.split('=', 1)[1]

# convert to dictionary
data = json.loads(script)

for item in data['searchData']['results']['products']:
    #print(item.keys())
    #for key, value in item.items():
    #    print(key, '=', value)

    print('product:', item['product'])
    #print('productId:', item['productId'])
    #print('brand:', item['brand'])
    print('url:', 'https://www.myntra.com/' + item['landingPageUrl'])
    print('---')

结果。

product: Puma Men Black Rapid Runner IDP Running Shoes
url: https://www.myntra.com/sports-shoes/puma/puma-men-black-rapid-runner-idp-running-shoes/9005767/buy
---
product: Puma Men White Smash Leather Sneakers
url: https://www.myntra.com/casual-shoes/puma/puma-men-white-smash-leather-sneakers/1966314/buy
---
product: Puma Unisex Grey Escaper Core Running Shoes
url: https://www.myntra.com/sports-shoes/puma/puma-unisex-grey-escaper-core-running-shoes/10137271/buy
---
product: Red Tape Men Brown Leather Derbys
url: https://www.myntra.com/casual-shoes/red-tape/red-tape-men-brown-leather-derbys/10300791/buy
---

EDIT: 相同的是 Scrapy

你可以把所有的代码放在一个文件中,然后运行 python script.py 无需创建项目。

它使用 meta 将产品数据从一个解析器(解析主页面)发送到另一个解析器(解析产品页面)。

import scrapy
import json

class MySpider(scrapy.Spider):

    name = 'myspider'

    start_urls = ['https://www.myntra.com/men-footwear']

    def parse(self, response):
        print('url:', response.url)

        scripts = response.xpath('//script/text()')[9].get()

        # remove window.__myx = 
        script = scripts.split('=', 1)[1]

        # convert to dictionary
        data = json.loads(script)

        for item in data['searchData']['results']['products']:

            info = {
                'product': item['product'],
                'productId': item['productId'],
                'brand': item['brand'],
                'url': 'https://www.myntra.com/' + item['landingPageUrl'],
            }

            #yield info

            yield response.follow(item['landingPageUrl'], callback=self.parse_item, meta={'item': info})

    def parse_item(self, response):
        print('url:', response.url)

        info = response.meta['item']

        # TODO: parse product page with more information

        yield info

# --- run without project and save in `output.csv` ---

from scrapy.crawler import CrawlerProcess

c = CrawlerProcess({
    'USER_AGENT': 'Mozilla/5.0',
    # save in file CSV, JSON or XML
    'FEED_FORMAT': 'csv',     # csv, json, xml
    'FEED_URI': 'output.csv', #
})
c.crawl(MySpider)
c.start() 
© www.soinside.com 2019 - 2024. All rights reserved.