我已经完成了对wikipedia的信息框的删除,但我不知道如何在csv文件中存储taht数据。请帮帮我。
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen
def infobox(query) :
query = query
url = 'https://en.wikipedia.org/wiki/'+query
raw = urlopen(url)
soup = bs(raw)
table = soup.find('table',{'class':'infobox vcard'})
for tr in table.find_all('tr') :
print(tr.text)
infobox('Infosys')
你必须收集所需的数据并写入csv文件,你可以使用csv模块看下面的例子:
from bs4 import BeautifulSoup as bs
from urllib import urlopen
import csv
def infobox(query) :
query = query
content_list = []
url = 'https://en.wikipedia.org/wiki/'+query
raw = urlopen(url)
soup = bs(raw)
table = soup.find('table',{'class':'infobox vcard'})
for tr in table.find_all('tr') :
if len(tr.contents) > 1:
content_list.append([tr.contents[0].text.encode('utf-8'), tr.contents[1].text.encode('utf-8')])
elif tr.text:
content_list.append([tr.text.encode('utf-8')])
write_csv_file(content_list)
def write_csv_file(content_list):
with open(r'd:\Test.csv', mode='wb') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerows(content_list)
infobox('Infosys')
下面概述了如何测试行中是否有一个标题和一个表格单元格元素以确保两列(您可以展开以仅编写td行以填充if结构中的第一列)。我使用稍微不同的编码语法来获得更清晰的输出,select
用于更快的元素选择而不是查找和利用pandas来生成csv。
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
url = 'https://en.wikipedia.org/wiki/'+ 'Infosys'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36', 'Referer': 'https://www.nseindia.com/'}
r = requests.get(url, headers=headers)
soup = bs(r.content,'lxml')
table =soup.select_one('.infobox.vcard')
rows = table.find_all('tr')
output = []
for row in rows:
if len(row.select('th, td')) == 2:
outputRow = [row.select_one('th').text, row.select_one('td').text, [item['href'] for item in row.select('td a')] if row.select_one('td a') is not None else '']
outputRow[2] = ['https://en.wikipedia.org/wiki/Infosys' + item if item[0] == '#' else 'https://en.wikipedia.org' + item for item in outputRow[2]]
output.append(outputRow)
df = pd.DataFrame(output)
df.to_csv(r'C:\Users\User\Desktop\Data.csv', sep=',', encoding='utf-8-sig',index = False )