我想定期获取同一个
aiohttp.ClientSession()
内的多个网页。这是我到目前为止所得到的。 URL 需要保留在作业中,因为需要计算其他一些 URL。
???
处缺少什么命令。或者我需要以完全不同的方式来做这件事?预先感谢您的帮助。
P.S.:秒间隔仅用于测试目的。稍后我会改为一分钟间隔。
from apscheduler.schedulers.asyncio import AsyncIOScheduler
import asyncio
import aiohttp
async def fetch(session, url, timeout=3):
async with session.get(url, ssl=False, timeout=timeout) as response:
return await response.text(), response.status
async def GOESX_job(session):
url = 'https://services.swpc.noaa.gov/json/goes/primary/xrays-6-hour.json'
response, status = await fetch(session, url)
print(status)
async def GOESp_job(session):
url = 'https://services.swpc.noaa.gov/json/goes/primary/integral-protons-6-hour.json'
response, status = await fetch(session, url)
print(status)
async def jobs(scheduler):
async with aiohttp.ClientSession() as session:
scheduler.add_job(GOESX_job, 'interval', seconds=2, args=[session])
scheduler.add_job(GOESp_job, 'interval', seconds=3, args=[session])
scheduler = AsyncIOScheduler()
??? jobs(scheduler)
scheduler.start()
asyncio.get_event_loop().run_forever()
也许有些曲调:
async def main():
scheduler = AsyncIOScheduler()
async with aiohttp.ClientSession() as session:
scheduler.add_job(GOESX_job, 'interval', seconds=2, args=[session])
scheduler.add_job(GOESp_job, 'interval', seconds=3, args=[session])
scheduler.start()
await asyncio.sleep(float("inf"))
asyncio.run(main())