使用python在没有API的情况下刮擦wunderground

问题描述 投票:1回答:2

我在数据抓取方面不是很有经验,所以这里的问题对某些人来说可能是显而易见的。

我想要的是从wunderground.com获取历史每日天气数据,而无需支付API。也许根本不可能。

我的方法只是使用requests.get并将整个文本保存到文件中(下面的代码)。

而不是获取可以从Web浏览器访问的表(参见下图),结果是一个文件几乎包含所有这些表。像这样的东西:

摘要 没有记录数据 每日观察 没有数据记录

奇怪的是,如果我保存 - 作为Firefox的网页,结果取决于我选择'网页,只有HTML'还是'网页,完整':后者包括我感兴趣的数据,前者没有。

是否有可能这是故意的,所以没有人会刮掉他们的数据?我只是想确保没有解决这个问题的方法。

胡安先生,谢谢你

注意:我尝试使用user-agent字段无济于事。

# Note: I run > set PYTHONIOENCODING=utf-8 before executing python
import requests

# URL with wunderground weather information for a specific date:
date = '2019-03-12'
url = 'https://www.wunderground.com/history/daily/sd/khartoum/HSSS/date/' + date
r = requests.get(url)

# Write a file to check if the tables ar being retrieved:
with open('test.html', 'wb') as testfile:
    testfile.write(r.text.encode('utf-8'))

Screenshot of the tables I want to scrape.


更新:找到一个解决方案

感谢指向我的selenium模块,这是我需要的确切解决方案。该代码提取给定日期的URL上存在的所有表(正常访问站点时看到)。它需要修改才能收集日期列表并组织创建的CSV文件。

注意:工作目录中需要geckodriver.exe

from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.keys import Keys
import requests, sys, re

# URL with wunderground weather information
url = 'https://www.wunderground.com/history/daily/sd/khartoum/HSSS/date/2019-3-12'

# Commands related to the webdriver (not sure what they do, but I can guess):
bi = FirefoxBinary(r'C:\Program Files (x86)\Mozilla Firefox\\firefox.exe')
br = webdriver.Firefox(firefox_binary=bi)

# This starts an instance of Firefox at the specified URL:
br.get(url)

# I understand that at this point the data is in html format and can be
# extracted with BeautifulSoup:
sopa = BeautifulSoup(br.page_source, 'lxml')

# Close the firefox instance started before:
br.quit()

# I'm only interested in the tables contained on the page:
tablas = sopa.find_all('table')

# Write all the tables into csv files:
for i in range(len(tablas)):
    out_file = open('wunderground' + str(i + 1) + '.csv', 'w')
    tabla = tablas[i]

    # ---- Write the table header: ----
    table_head = tabla.findAll('th')
    output_head = []
    for head in table_head:
        output_head.append(head.text.strip())

    # Some cleaning and formatting of the text before writing:
    encabezado = '"' + '";"'.join(output_head) + '"'
    encabezado = re.sub('\s', '', encabezado) + '\n'
    out_file.write(encabezado.encode(encoding='UTF-8'))

    # ---- Write the rows: ----
    output_rows = []
    filas = tabla.findAll('tr')
    for j in range(1, len(filas)):
        table_row = filas[j]
        columns = table_row.findAll('td')
        output_row = []
        for column in columns:
            output_row.append(column.text.strip())

        # Some cleaning and formatting of the text before writing:
        fila = '"' + '";"'.join(output_row) + '"'
        fila = re.sub('\s', '', fila) + '\n'
        out_file.write(fila.encode(encoding='UTF-8'))

    out_file.close()

额外的:@QHarr的答案很漂亮,但我需要做一些修改才能使用它,因为我在我的电脑上使用了firefox。重要的是要注意,为了实现这一点,我必须将geckodriver.exe文件添加到我的工作目录中。这是代码:

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd

url = 'https://www.wunderground.com/history/daily/sd/khartoum/HSSS/date/2019-03-12'
bi = FirefoxBinary(r'C:\Program Files (x86)\Mozilla Firefox\\firefox.exe')
driver = webdriver.Firefox(firefox_binary=bi)
# driver = webdriver.Chrome()
driver.get(url)
tables = WebDriverWait(driver,20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "table")))
for table in tables:
    newTable = pd.read_html(table.get_attribute('outerHTML'))
    if newTable:
        print(newTable[0].fillna(''))
python web-scraping wunderground
2个回答
1
投票

您可以使用selenium来确保页面加载,然后pandas read_html来获取表格

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd

url = 'https://www.wunderground.com/history/daily/sd/khartoum/HSSS/date/2019-03-12'
driver = webdriver.Chrome()
driver.get(url)
tables = WebDriverWait(driver,20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "table")))
for table in tables:
    newTable = pd.read_html(table.get_attribute('outerHTML'))
    if newTable:
        print(newTable[0].fillna(''))

0
投票

另一个方向:使用网站正在进行的API调用。

(HTTP调用来自Chrome开发人员工具)

例:

HTTP GET https://api-ak.wunderground.com/api/d8585d80376a429e/history_20180812/lang:EN/units:english/bestfct:1/v:2.0/q/HSSS.json?showObs=0&ttl=120

响应

{
    "response": {
        "version": "2.0",
        "units": "english",
        "termsofService": "https://www.wunderground.com/weather/api/d/terms.html",
        "attribution": {
        "image":"//icons.wxug.com/graphics/wu2/logo_130x80.png",
        "title":"Weather Underground",
        "link":"http://www.wunderground.com"
        },
        "features": {
        "history": 1
        }
        , "location": {
        "name": "Khartoum",
        "neighborhood":null,
        "city": "Khartoum",
        "state": null,
        "state_name":"Sudan",
        "country": "SD",
        "country_iso3166":"SA",
        "country_name":"Saudi Arabia",
        "continent":"AS",
        "zip":"00000",
        "magic":"474",
        "wmo":"62721",
        "radarcode":"xxx",
        "radarregion_ic":null,
        "radarregion_link": "//",
        "latitude":15.60000038,
        "longitude":32.54999924,
        "elevation":null,
        "wfo": null,
        "l": "/q/zmw:00000.474.62721",
        "canonical": "/weather/sa/khartoum"
        },
        "date": {
    "epoch": 1553287561,
    "pretty": "11:46 PM EAT on March 22, 2019",
    "rfc822": "Fri, 22 Mar 2019 23:46:01 +0300",
    "iso8601": "2019-03-22T23:46:01+0300",
    "year": 2019,
    "month": 3,
    "day": 22,
    "yday": 80,
    "hour": 23,
    "min": "46",
    "sec": 1,
    "monthname": "March",
    "monthname_short": "Mar",
    "weekday": "Friday",
    "weekday_short": "Fri",
    "ampm": "PM",
    "tz_short": "EAT",
    "tz_long": "Africa/Khartoum",
    "tz_offset_text": "+0300",
    "tz_offset_hours": 3.00
}
    }
        ,
"history": {
    "start_date": {
    "epoch": 1534064400,
    "pretty": "12:00 PM EAT on August 12, 2018",
    "rfc822": "Sun, 12 Aug 2018 12:00:00 +0300",
    "iso8601": "2018-08-12T12:00:00+0300",
    "year": 2018,
    "month": 8,
    "day": 12,
    "yday": 223,
    "hour": 12,
    "min": "00",
    "sec": 0,
    "monthname": "August",
    "monthname_short": "Aug",
    "weekday": "Sunday",
    "weekday_short": "Sun",
    "ampm": "PM",
    "tz_short": "EAT",
    "tz_long": "Africa/Khartoum",
    "tz_offset_text": "+0300",
    "tz_offset_hours": 3.00
},
    "end_date": {
    "epoch": null,
    "pretty": null,
    "rfc822": null,
    "iso8601": null,
    "year": null,
    "month": null,
    "day": null,
    "yday": null,
    "hour": null,
    "min": null,
    "sec": null,
    "monthname": null,
    "monthname_short": null,
    "weekday": null,
    "weekday_short": null,
    "ampm": null,
    "tz_short": null,
    "tz_long": null,
    "tz_offset_text": null,
    "tz_offset_hours": null
},
    "days": [
        {
        "summary": {
        "date": {
    "epoch": 1534021200,
    "pretty": "12:00 AM EAT on August 12, 2018",
    "rfc822": "Sun, 12 Aug 2018 00:00:00 +0300",
    "iso8601": "2018-08-12T00:00:00+0300",
    "year": 2018,
    "month": 8,
    "day": 12,
    "yday": 223,
    "hour": 0,
    "min": "00",
    "sec": 0,
    "monthname": "August",
    "monthname_short": "Aug",
    "weekday": "Sunday",
    "weekday_short": "Sun",
    "ampm": "AM",
    "tz_short": "EAT",
    "tz_long": "Africa/Khartoum",
    "tz_offset_text": "+0300",
    "tz_offset_hours": 3.00
},
        "temperature": 82,
    "dewpoint": 66,
    "pressure": 29.94,
    "wind_speed": 11,
    "wind_dir": "SSE",
    "wind_dir_degrees": 166,
    "visibility": 5.9,
    "humidity": 57,
    "max_temperature": 89,
    "min_temperature": 75,
    "temperature_normal": null,
    "min_temperature_normal": null,
    "max_temperature_normal": null,
    "min_temperature_record": null,
    "max_temperature_record": null,
    "min_temperature_record_year": null,
    "max_temperature_record_year": null,
    "max_humidity": 83,
    "min_humidity": 40,
    "max_dewpoint": 70,
    "min_dewpoint": 63,
    "max_pressure": 29.98,
    "min_pressure": 29.89,
    "max_wind_speed": 22,
    "min_wind_speed": 5,
    "max_visibility": 6.2,
    "min_visibility": 1.9,
    "fog": 0,
    "hail": 0,
    "snow": 0,
    "rain": 1,
    "thunder": 0,
    "tornado": 0,
    "snowfall": null,
    "monthtodatesnowfall": null,
    "since1julsnowfall": null,
    "snowdepth": null,
    "precip": 0.00,
    "preciprecord": null,
    "preciprecordyear": null,
    "precipnormal": null,
    "since1janprecipitation": null,
    "since1janprecipitationnormal": null,
    "monthtodateprecipitation": null,
    "monthtodateprecipitationnormal": null,
    "precipsource": "3Or6HourObs",
    "gdegreedays": 32,
    "heatingdegreedays": 0,
    "coolingdegreedays": 17,
    "heatingdegreedaysnormal": null,
    "monthtodateheatingdegreedays": null,
    "monthtodateheatingdegreedaysnormal": null,
    "since1sepheatingdegreedays": null,
    "since1sepheatingdegreedaysnormal": null,
    "since1julheatingdegreedays": null,
    "since1julheatingdegreedaysnormal": null,
    "coolingdegreedaysnormal": null,
    "monthtodatecoolingdegreedays": null,
    "monthtodatecoolingdegreedaysnormal": null,
    "since1sepcoolingdegreedays": null,
    "since1sepcoolingdegreedaysnormal": null,
    "since1jancoolingdegreedays": null,
    "since1jancoolingdegreedaysnormal": null
,
        "avgoktas": 5,
        "icon": "rain"
        }
        }
    ]
}
}
© www.soinside.com 2019 - 2024. All rights reserved.