从txt文件中抓取start_urls



我有大约100K个url要抓取,所以我想从txt文件中读取它们这是代码

import scrapy
from scrapy import Request
from scrapy.crawler import CrawlerProcess
class ConadstoresSpider(scrapy.Spider):
name = 'conadstores'
headers = {'user_agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
allowed_domains = ['conad.it']
#start_urls = ['http://www.conad.it/ricerca-negozi/negozio.002781.html','https://www.conad.it/ricerca-negozi/negozio.006804.html']
#start_urls = [l.strip() for l in open("/Users/macbook/PycharmProjects/conad/conad/conadlinks.txt").readlines()]
#f = open("/Users/macbook/PycharmProjects/conad/conad/conadlinks.txt")
#start_urls = [url.strip() for url in f.readlines()]
#f.close()
with open('/Users/macbook/PycharmProjects/conad/conad/conadlinks.txt') as file:
start_urls = [line.strip() for line in file]

def start_request(self):
request = Request(url = self.start_urls, callback=self.parse)
yield request
def parse(self, response):
yield {
'address' : response.css('.address-oswald::text').extract(),
'phone' : response.css('span.phone::text').extract(),
}

但我一直收到这个错误

2021-12-08 13:27:48[scrapy.core.engine]错误:获取启动请求时出错追踪(最近一次通话(:文件"/Users/macbook/PycharmProjects/conad/venv/lib/python3.9/site packages/scrapy/core/engine.py";,第127行,在_next_request中request=下一个(slot.start_requests(文件"/Users/macbook/PycharmProjects/conad/conad/medlewares.py";,第52行,在process_start_requests中对于start_requests中的r:文件"/Users/macbook/PycharmProjects/conad/venv/lib/python3.9/site packages/scrapy/spiders/init.py";,第83行,在start_requests中yield请求(url,dont_filter=True(文件"/Users/macbook/PycharmProjects/conad/venv/lib/python3.9/site packages/scrapy/http/request/init.py";,第25行,在initself中_set_url(url(文件"/Users/macbook/PycharmProjects/conad/venv/lib/python3.9/site packages/scrapy/http/request/init.py";,第62行,在_set_url中raise ValueError('请求url中缺少方案:%s'%self._url(ValueError:请求url中缺少方案:%7B%5Curtf1%5Cansi%5Cansicpg1252%5Cocoartf2580

知道吗?谢谢

我们可以覆盖spider的start_requests((方法中的start_urls逻辑

这是提取数据的简单方法

import scrapy

class ConadstoresSpider(scrapy.Spider):
name = 'conadstores'
def start_requests(self):
# read file data (you can use different logic for extract URLS from text files)
a_file = open("/Users/macbook/PycharmProjects/conad/conad/conadlinks.txt")
file_contents = a_file.read()
contents_split = file_contents.splitlines()
# extract urls from text file and store in list
for url in contents_split:
# send request to extracted URL.
yield scrapy.Request(url)
def parse(self, response, **kwargs):
yield {
'address': response.css('.address-oswald::text').extract(),
'phone': response.css('span.phone::text').extract(),
}

您可以使用不同的文件读取逻辑,但要确保它是返回url列表

相关内容

最新更新