我试图通过scrapyd-deploy部署一个scrapy项目到远程scrapyd服务器上。当我通过git push prod部署到远程服务器时,项目本身是可以正常工作的,在我的本地机器上和远程服务器上都能完美地工作。
在使用scrapyd-deploy时,我得到了这个错误。
% scrapyd-deploy example -p apo
{ "node_name": "spider1",
"status": "error",
"message": "/usr/local/lib/python3.8/dist-packages/scrapy/utils/project.py:90: ScrapyDeprecationWarning: Use of environment variables prefixed with SCRAPY_ to override settings is deprecated. The following environment variables are currently defined: EGG_VERSION\n warnings.warn(\nTraceback (most recent call last):\n File \"/usr/lib/python3.8/runpy.py\", line 193, in _run_module_as_main\n return _run_code(code, main_globals, None,\n File \"/usr/lib/python3.8/runpy.py\", line 86, in _run_code\n exec(code, run_globals)\n File \"/usr/local/lib/python3.8/dist-packages/scrapyd/runner.py\", line 40, in <module>\n main()\n File \"/usr/local/lib/python3.8/dist-packages/scrapyd/runner.py\", line 37, in main\n execute()\n File \"/usr/local/lib/python3.8/dist-packages/scrapy/cmdline.py\", line 142, in execute\n cmd.crawler_process = CrawlerProcess(settings)\n File \"/usr/local/lib/python3.8/dist-packages/scrapy/crawler.py\", line 280, in __init__\n super(CrawlerProcess, self).__init__(settings)\n File \"/usr/local/lib/python3.8/dist-packages/scrapy/crawler.py\", line 152, in __init__\n self.spider_loader = self._get_spider_loader(settings)\n File \"/usr/local/lib/python3.8/dist-packages/scrapy/crawler.py\", line 146, in _get_spider_loader\n return loader_cls.from_settings(settings.frozencopy())\n File \"/usr/local/lib/python3.8/dist-packages/scrapy/spiderloader.py\", line 60, in from_settings\n return cls(settings)\n File \"/usr/local/lib/python3.8/dist-packages/scrapy/spiderloader.py\", line 24, in __init__\n self._load_all_spiders()\n File \"/usr/local/lib/python3.8/dist-packages/scrapy/spiderloader.py\", line 46, in _load_all_spiders\n for module in walk_modules(name):\n File \"/usr/local/lib/python3.8/dist-packages/scrapy/utils/misc.py\", line 77, in walk_modules\n submod = import_module(fullpath)\n File \"/usr/lib/python3.8/importlib/__init__.py\",
line 127, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n File \"<frozen importlib._bootstrap>\",
line 1014, in _gcd_import\n File \"<frozen importlib._bootstrap>\",
line 991, in _find_and_load\n File \"<frozen importlib._bootstrap>\",
line 975, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\",
line 655, in _load_unlocked\n File \"<frozen importlib._bootstrap>\",
line 618, in _load_backward_compatible\n File \"<frozen zipimport>\",
line 259, in load_module\n File \"/tmp/apo-v1.0.2-114-g8a2f218-master-5kgzxesk.egg/bid/spiders/allaboutwatches.py\",
line 31, in <module>\n File \"/tmp/apo-v1.0.2-114-g8a2f218-master-5kgzxesk.egg/bid/spiders/allaboutwatches.py\",
line 36, in GetbidSpider\n File \"/tmp/apo-v1.0.2-114-g8a2f218-master-5kgzxesk.egg/bid/act_functions.py\",
line 10, in create_image_dir\nNotADirectoryError: [Errno 20]
Not a directory: '/tmp/apo-v1.0.2-114-g8a2f218-master-5kgzxesk.egg/bid/../images/allaboutwatches'\n"}
我猜测它与我调用的这个方法有关,因为一旦我把这个方法的调用删掉,部分错误就会消失。
# function will create a custom name directory to hold the images of each crawl
def create_image_dir(name):
project_dir = os.path.dirname(__file__)+'/../' #<-- absolute dir the script is in
img_dir = project_dir+"images/"+name
if not os.path.exists(img_dir):
os.mkdir(img_dir);
custom_settings = {
'IMAGES_STORE': img_dir ,
}
return custom_settings
这个方法也是如此
def brandnames():
brands = dict()
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = "imports/brand_names.csv"
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path, newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=';', quotechar='"')
for row in reader:
brands[row['name'].lower()] = row['name']
return brands
我怎样才能改变方法或deploy配置来保持我的功能?
os.mkdir
可能会失败,因为它不能创建嵌套目录。你可以使用 os.makedirs(img_dir, exist_ok=True)
而不是。os.path.dirname(__file__)
会指向临时文件... 在scrapyd下。不知道这是不是你想要的。我会使用一个绝对路径,而不调用 os.path.dirname(__file__)
如果你不想让图片被下载到tmp下。