print ('Page Loaded')
myelem = driver.find_elements_by_xpath(".//tr[contains(@class, 'inlineListRow')]")
with open("systext.csv","wb") as f:
writer = csv.writer(f)
for myinfo in myelem:
anotherlist = []
itemize = myinfo.find_elements_by_xpath(".//td[contains(@class, 'inlineListBodyCell')]")
for peritem in itemize:
anotherlist.append(peritem.text)
writer.writerows([anotherlist])
for peritem in itemize:
anotherlist.append(peritem.text)
writer.writerows([anotherlist])
我试图从网站中提取信息,然后将其写入csv我得到所有元素,然后得到每个元素的子元素
我得到的代码有效
问题是完成我的循环需要 10+ 分钟是正常的还是正常的我的代码对任务效率不高吗
数据为 13 列 x 1800 行 总大小为 400kb
我不知道
整个代码,但它应该是这样的:
print('Page Loaded')
myelem = driver.find_elements_by_xpath(".//tr[contains(@class, 'inlineListRow')]")
async def iterate(w, info):
anotherlist = []
itemize = info.find_elements_by_xpath(".//td[contains(@class, 'inlineListBodyCell')]")
for peritem in itemize:
anotherlist.append(peritem.text)
# if in this nested loop by anotherlist you meant anotherlist2 otherwhise remove it
# anotherlist2.append(peritem.text)
# writer.writerows([anotherlist2])
w.writerows([anotherlist])
return
with open("systext.csv","wb") as f:
writer = csv.writer(f)
for myinfo in myelem:
iterate(writer, myinfo)