如何刮一个有困难的桌子阅读的网站(熊猫和美味的汤)?

问题描述 投票:0回答:1

我试图从https://www.seethroughny.net/payrolls/110681345刮取数据,但桌子很难处理。

我尝试了很多东西。

import pandas as pd
import ssl
import csv

ssl._create_default_https_context = ssl._create_unverified_context


calls_df = pd.read_html("https://www.seethroughny.net/payrolls/110681345", header=0)
print(calls_df)

calls_df.to_csv("calls.csv", index=False)

我想将其解析为一个csv文件,我将其与另一个数据集进行索引匹配。

python web-scraping html-table beautifulsoup
1个回答
2
投票

有一个包含html的json响应。似乎某些东西在整个结束循环版本中的随机点阻止请求

单页版本,您可以将current_page值更改为相应的页码。

import requests
import pandas as pd
from bs4 import BeautifulSoup as bs

url = 'https://www.seethroughny.net/tools/required/reports/payroll?action=get'
headers = {

  'Accept' : 'application/json, text/javascript, */*; q=0.01' ,
  'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
  'User-Agent' : 'Mozilla/5.0',
'Referer' : 'https://www.seethroughny.net/payrolls/110681'
}

data = {

  'PayYear[]' : '2018',
  'BranchName[]' : 'Villages',
 'SortBy' : 'YTDPay DESC',
 'current_page' : '0',
 'result_id' : '110687408',
 'url' : '/tools/required/reports/payroll?action=get',
 'nav_request' : '0'  

}

r = requests.post(url,  headers = headers, data = data).json()
soup = bs(r['html'], 'lxml')

results = []

for item in soup.select('tr:nth-child(odd)'):
    row = [subItem.text for subItem in item.select('td')][1:]
    results.append(row)

df = pd.DataFrame(results)
df.to_csv(r'C:\Users\User\Desktop\Data.csv', sep=',', encoding='utf-8-sig',index = False )

所有页面版本(当前请求正在进行的工作可能无法在循环中的不同点返回json,尽管有延迟)。使用@ sim建议交换用户代理似乎有所改善。

import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
import time
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
import random


ua = ['Mozilla/5.0',
      'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
      'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
      'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36',
      'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'
      'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
   ]


url = 'https://www.seethroughny.net/tools/required/reports/payroll?action=get'
headers = {

  'Accept' : 'application/json, text/javascript, */*; q=0.01' ,
  'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
  'User-Agent' : 'Mozilla/5.0',
'Referer' : 'https://www.seethroughny.net/payrolls/110681'
}

data = {

  'PayYear[]' : '2018',
  'BranchName[]' : 'Villages',
 'SortBy' : 'YTDPay DESC',
 'current_page' : '0',
 'result_id' : '110687408',
 'url' : '/tools/required/reports/payroll?action=get',
 'nav_request' : '0'  

}

results = []
i = 0
with requests.Session() as s:
    retries = Retry(total=5,
                backoff_factor=0.1,
                status_forcelist=[ 500, 502, 503, 504 ])

    s.mount('http://', HTTPAdapter(max_retries=retries))

    while len(results) < 1000: #total:
        data['current_page'] = i
        data['result_id'] = str(int(data['result_id']) + i)

        try:
            r = s.post(url, headers = headers, data = data).json()
        except Exception as e:
            print(e)
            time.sleep(2)
            headers['User-Agent'] =  random.choice(ua)
            r = s.post(url, headers = headers, data = data).json()
            continue
        soup = bs(r['html'], 'lxml')

        for item in soup.select('tr:nth-child(odd)'):
            row = [subItem.text for subItem in item.select('td')][1:]
            results.append(row)
        i+=1

@Sim的版本:

import requests
import pandas as pd
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

url = 'https://www.seethroughny.net/tools/required/reports/payroll?action=get'

headers = {
    'User-Agent' : 'Mozilla/5.0',
    'Referer' : 'https://www.seethroughny.net/payrolls/110681'
}

data = {
    'PayYear[]' : '2018',
    'BranchName[]' : 'Villages',
    'SortBy' : 'YTDPay DESC',
    'current_page' : '0',
    'result_id' : '110687408',
    'url' : '/tools/required/reports/payroll?action=get',
    'nav_request' : '0'  
}

results = []

i = 0

def get_content(i):
    while len(results) < 15908:
        print(len(results))
        data['current_page'] = i
        headers['User-Agent'] = ua.random
        try:
            r = requests.post(url, headers = headers, data = data).json()
        except Exception:
            time.sleep(1)
            get_content(i)

        soup = BeautifulSoup(r['html'], 'lxml')

        for item in soup.select('tr:nth-child(odd)'):
            row = [subItem.text for subItem in item.select('td')][1:]
            results.append(row)
        i+=1

if __name__ == '__main__':
    ua = UserAgent()
    get_content(i)
© www.soinside.com 2019 - 2024. All rights reserved.