这个硒Python脚本:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
import time
def run():
print('running')
driver = webdriver.Chrome()
driver.set_page_load_timeout(2000)
driver.set_window_size(1024, 600)
driver.maximize_window()
try:
driver.get("https://www.pjdsafetysupplies.com/Admin/Product/List")
selectElement = Select(driver.find_element(By.ID, "SearchCategoryId"))
selectElement.select_by_visible_text('Special Offers')
search = driver.find_element(By.ID, 'search-products')
search.click()
driver.find_element(By.XPATH,".//button[@data-toggle='dropdown']").click()
driver.find_element(By.XPATH,".//button[@name='exportexcel-all']").click()
except Exception as e:
print("Page Load Fail:"+str(e))
finally:
driver.quit()
run()
应该从网站导出 csv,但网站生成的文件非常大,并且在文件创建完成之前脚本不断超时并显示此错误消息:
HTTPConnectionPool(host='localhost', port=54896): 读取超时。 (读取超时=120)
我尝试将 load_timeout 设置为 2000,但错误消息指出读取超时为 120。我也尝试设置 Timeouts.implicit_wait 和 Timeouts.script,但无论如何我都会收到相同的错误消息。
该问题与 WebDriver 服务器或底层 HTTP 连接的超时设置有关,请尝试以下操作:
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
capabilities = DesiredCapabilities().CHROME
capabilities['timeouts'] = {
'pageLoad': 200000, # Page load timeout in milliseconds
'script': 200000, # Script timeout in milliseconds
driver = webdriver.Chrome(desired_capabilities=capabilities, options=chrome_options)
您还可以尝试使用显式等待
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# Wait for download button to be clickable
WebDriverWait(driver, 300).until(
EC.element_to_be_clickable((By.XPATH, ".//button[@name='exportexcel-all']"))
).click()