因此,我有一段代码可以启动4个硒铬驱动程序,并从网页上的一个元素中抓取数据。代码可以简化为以下内容:
import json
import multiprocessing as mp
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
class scraper():
def __init__(self,list_of_urls, process_num):
self.urls = list_of_urls
self.process_num = process_num
def scrape_urls(self):
driver = webdriver.Chrome(driver_dir)
data = []
for url in self.urls:
driver.get(url)
element = WebDriverWait(driver, timeout=7).until(lambda d: d.find_element(by=By.CLASS_NAME, value="InterestingData"))
data.append(element.text)
print("Scraper # ", self.process_num," got data from: ",url)
return data
if __name__ == '__main__':
with open('array_of_urls', 'r') as infile:
urls = json.load(infile)
number_of_processes=4
length_of_urls = len(urls)
partition_into = math.ceil(length_of_urls/number_of_processes)
scrapers = []
start = 0
end = start + partition_into
for num in range(number_of_processes):
new_scraper = scraper(urls[start:end],num)
scrapers.append(new_scraper)
start = end
end = start + partition_into
if end > length_of_urls:
end = length_of_urls-1
with mp.Pool(processes=number_of_processes) as pool:
result_array = []
for num in range(number_of_processes):
result_array.append(pool.apply_async(scrapers[num].scrape_urls))
pool.close()
pool.join()
我遇到的问题是,5-10分钟后,其中一个刮刀就会停止,唯一能唤醒它的就是手动刷新浏览器上的页面。如果我把它放一个小时左右,四个中的三个会停下来,只有一个在跑。他们不会出错或打印任何东西,只是停止运行。我在两台不同的笔记本电脑上试用过,但它们都有相同的问题。我还尝试过用4个不同的mp.Process((运行scrape_url来实现这一点,这也做了同样的事情。有没有其他人遇到这个问题,或者我在这里做错了什么?
首先,Selenium已经在创建一个进程,因此使用多线程而不是多处理要好得多,因为每个线程都会启动一个进程。此外,在driver = webdriver.Chrome(driver_dir)
语句之后的scrape_urls
中,函数的其余部分应包含在try/finaly语句中,其中finally块包含driver.quit()
,以确保无论是否出现异常,驱动程序进程都会终止。现在,您让所有驱动程序进程都处于运行状态。
您还可以考虑使用以下技术来创建一个大小为4(或更小,取决于要处理的URL数量(的线程池,但池中的每个线程都会自动重用分配给其线程的驱动程序,该驱动程序保存在线程本地存储中。您可能希望更改用于创建驱动程序的选项(当前为"无头"模式(:
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from multiprocessing.pool import ThreadPool
import threading
import gc
threadLocal = threading.local()
class Driver:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
self.driver = webdriver.Chrome(options=options)
def __del__(self):
self.driver.quit() # clean up driver when we are cleaned up
print('The driver has been "quitted".')
@classmethod
def create_driver(cls):
the_driver = getattr(threadLocal, 'the_driver', None)
if the_driver is None:
print('Creating new driver.')
the_driver = cls()
threadLocal.the_driver = the_driver
driver = the_driver.driver
the_driver = None
return driver
def scraper(url):
"""
This now scrapes a single URL.
"""
driver = Driver.create_driver()
driver.get(url)
element = WebDriverWait(driver, timeout=7).until(lambda d: d.find_element(by=By.CLASS_NAME, value="InterestingData"))
print("got data from: ", url)
return element.text
with open('array_of_urls', 'r') as infile:
urls = json.load(infile)
number_of_processes = min(4, len(urls))
with ThreadPool(processes=number_of_processes) as pool:
result_array = pool.map(scraper, urls)
# Must ensure drivers are quitted before threads are destroyed:
del threadLocal
# This should ensure that the __del__ method is run on class Driver:
gc.collect()
pool.close()
pool.join()