我正试图从网页上获取HTML。但是,并非所有URL都已正确编写。列表中的大多数无效URL都包含http,但现在URL正在使用https。有些人错过了“www。”,还有其他人“www。”需要添加。
def repl_www_http(url):
x = url.replace("www.", "")
y = x.replace("http", "https")
return y
def repl_www(url):
y = url.replace("www.", "")
return y
def repl_http(url):
y = url.replace("http", "https")
return y
def repl_no_www(url):
y = url.replace("//", "//www.")
return y
def get_html(urllist):
for i in urllist:
html = ""
try:
html = requests.get(i)
html = html.text
return html
except requests.exceptions.ConnectionError:
try:
html = requests.get(repl_http(i))
html = html.text
print("replaced // with //www.")
except requests.exceptions.ConnectionError:
try:
html = requests.get(repl_http(i))
html = html.text
print("replaced http with https")
return html
except requests.exceptions.ConnectionError:
try:
html = requests.get(repl_www(i))
html = html.text
print("replaced www. with .")
return html
except requests.exceptions.ConnectionError:
try:
html = requests.get(repl_www_http(i))
html = html.text
print("replaced www with . and http with https")
return html
except requests.exceptions.ConnectionError:
return "no HTML found on this URL"
print("gethtml finished", html)
这是我得到的错误:
Traceback (most recent call last): File "C:\replacer.py", line 76, in <module> html = get_html(i)
File "C:\replacer.py", line 37, in get_html html = requests.get(repl_http(i))
File "C:\Users\LorenzKort\AppData\Local\Programs\Python\Python37\lib\site-packages\requests-2.19.1-py3.7.egg\requests\api.py", line 72, in get
return request('get', url, params=params, **kwargs) File "C:\Users\LorenzKort\AppData\Local\Programs\Python\Python37\lib\site-packages\requests-2.19.1-py3.7.egg\requests\api.py", line 58, in request
return session.request(method=method, url=url, **kwargs) File "C:\Users\LorenzKort\AppData\Local\Programs\Python\Python37\lib\site-packages\requests-2.19.1-py3.7.egg\requests\sessions.py", line 498, in request
prep = self.prepare_request(req) File "C:\Users\LorenzKort\AppData\Local\Programs\Python\Python37\lib\site-packages\requests-2.19.1-py3.7.egg\requests\sessions.py", line 441, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "C:\Users\LorenzKort\AppData\Local\Programs\Python\Python37\lib\site-packages\requests-2.19.1-py3.7.egg\requests\models.py",line 309, in prepare
self.prepare_url(url, params) File "C:\Users\LorenzKort\AppData\Local\Programs\Python\Python37\lib\site-packages\requests-2.19.1-py3.7.egg\requests\models.py",
line 383, in prepare_url
raise MissingSchema(error)requests.exceptions.MissingSchema: Invalid URL 'h': No schema supplied. Perhaps you meant http://h?
如何解决此问题以纠正错误的URL?
问题是url传递给requests.get()发送MissingSchema错误,你应该在捕获ConnectionError时捕获此错误。
我认为你应该使用生成器来清理你的代码,因为你不应该像这样嵌入try / catch语句。
def get_versions_url(my_url):
yield my_url
yield repl_www(my_url)
yield repl_http(my_url)
yield repl_http_www(my_url)
def get_html(urllist):
#use i only for indexes
for my_url in urllist:
for url_fixed in get_versions_url(my_url):
try:
# I dind't figure out why you return here and do not end first loop
return requests.get(url_fixed).text
except requests.exceptions.ConnectionError:
pass
except requests.exceptions.MissingSchema:
pass
然后你可以调试你的发电机。试着做 :
for url in fix_url(<your url>):
print(url)
我认为你的一些repl_函数不能像你期望的那样工作。