如何编写csv并插入scrape数据

问题描述 投票:-1回答:2

我正在为我的研究设计报废项目,但我坚持在csv中写废料数据。请帮帮我吗?

我已成功废弃数据,但我想将其存储在csv中,下面是我的代码

需要编写代码从网站中提取所有html然后将其保存到csv文件。

我相信我不知何故需要将链接转换为列表然后编写列表,但我不确定如何做到这一点。

这是我到目前为止:

import requests
import time
from bs4 import BeautifulSoup
import csv



# Collect and parse first page
page = requests.get('https://www.myamcat.com/jobs')
soup = BeautifulSoup(page.content, 'lxml')

print("Wait Scrapper is working on ")
time.sleep(10)
if(page.status_code != 200):

    print("Error in Srapping check the url")
else:

    print("Successfully scrape the data")
    time.sleep(10)
    print("Loading data in csv")
    file = csv.writer(open('dataminer.csv', 'w'))
    file.writerow(['ProfileName', 'CompanyName', 'Salary', 'Job', 'Location']) 

    for pname in soup.find_all(class_="profile-name"):

        #print(pname.text)
        profname = pname.text
        file.writerow([profname, ])

    for cname in soup.find_all(class_="company_name"):

        print(cname.text)

    for salary in soup.find_all(class_="salary"):

        print(salary.text)


    for lpa in soup.find_all(class_="jobText"):

        print(lpa.text) 

    for loc in soup.find_all(class_="location"):

        print(loc.text)







python csv web-scraping beautifulsoup
2个回答
0
投票

制作一个字典并将数据保存到其中然后保存到csv,检查下面的代码!

import requests
import time
from bs4 import BeautifulSoup
import csv



# Collect and parse first page
page = requests.get('https://www.myamcat.com/jobs')
soup = BeautifulSoup(page.content, 'lxml')
data = []
print("Wait Scrapper is working on ")
if(page.status_code != 200):
    print("Error in Srapping check the url")
else:
    print("Successfully scrape the data")
    for x in soup.find_all('div',attrs={'class':'job-page'}):
        data.append({
            'pname':x.find(class_="profile-name").text.encode('utf-8'),
            'cname':x.find(class_="company_name").text.encode('utf-8'),
            'salary':x.find(class_="salary").text.encode('utf-8'),
            'lpa':x.find(class_="jobText").text.encode('utf-8'),
            'loc':x.find(class_="location").text.encode('utf-8')})

print("Loading data in csv")
with open('dataminer.csv', 'w') as f:
    fields = ['salary', 'loc', 'cname', 'pname', 'lpa']
    writer = csv.DictWriter(f, fieldnames=fields)
    writer.writeheader()
    writer.writerows(data)

0
投票

除了你在其他答案中得到的东西,你也可以同时抓取和写出内容。我使用.select()而不是.find_all()来实现同样的目标。

import csv
import requests
from bs4 import BeautifulSoup

URL = "https://www.myamcat.com/jobs"

page = requests.get(URL)
soup = BeautifulSoup(page.text, 'lxml')
with open('myamcat_doc.csv','w',newline="",encoding="utf-8") as f:
    writer = csv.writer(f)
    writer.writerow(['pname','cname','salary','loc'])

    for item in soup.select(".job-listing .content"):
        pname = item.select_one(".profile-name h3").get_text(strip=True)
        cname = item.select_one(".company_name").get_text(strip=True)
        salary = item.select_one(".salary .jobText").get_text(strip=True)
        loc = item.select_one(".location .jobText").get_text(strip=True)
        writer.writerow([pname,cname,salary,loc])
© www.soinside.com 2019 - 2024. All rights reserved.