我的函数将DF的内容写入CSV文件中。
def writeToCSV(outDf, defFile, toFile, retainFlag=True, delim='t', quotechar='"'):
headers = []
fid = open(defFile, 'r')
for line in fid:
headers.append(line.replace('r','').split('n')[0].split('t')[0])
df = pd.DataFrame([], columns=headers)
for header in outDf.columns.values:
if header in headers:
df[header] = outDf[header]
df.to_csv(toFile, sep=delim, quotechar=quotechar, index=False, encoding='utf-8')
如何并行化此过程?目前我正在使用以下代码
def writeToSchemaParallel(outDf, defFile, toFile, retainFlag=True, delim='t', quotechar='"'):
logInfo('Start writingtoSchema in parallel...', 'track')
headers = []
fid = open(defFile, 'r')
for line in fid:
headers.append(line.replace('r','').split('n')[0].split('t')[0])
df = pd.DataFrame([], columns=headers)
for header in outDf.columns.values:
if header in headers:
df[header] = outDf[header]
out_Names = Queue()
cores = min([int(multiprocessing.cpu_count() / 2), int(len(outDf) / 200000)+1])
#cores=4
logInfo(str(cores) + 'cores are used...', 'track')
# split the data for parallel computation
outDf = splitDf(df, cores)
# process the chunks in parallel
logInfo('splitdf called are df divided...', 'track')
Filenames=[]
procs = []
fname=toFile.split("_Opera_output")[0]
for i in range(0, cores):
filename=fname+"_"+str(i)+".tsv"
proc = Process(target=writeToSchema, args=(outDf[i], defFile,filename, retainFlag,delim, quotechar,i))
procs.append(proc)
proc.start()
print 'processing '+str(i)
Filenames.append(filename)
# combine all returned chunks
# outDf = out_Names.get()
# for i in range(1, cores):
# outDf = outDf.append(out_q.get(), ignore_index=True)
for proc in procs:
proc.join()
logInfo('Now we merge files...', 'track')
print Filenames
with open(toFile,'w') as outfile:
for fname in Filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
但是它不起作用并给出以下错误
2017-12-17 16:02:55,078 - track - ERROR: Traceback (most recent call last):
2017-12-17 16:02:55,078 - track - ERROR: File "C:/Users/sudhir.tiwari/Document
s/AMEX2/Workspace/Backup/Trunk/code/runMapping.py", line 257, in <module>
2017-12-17 16:02:55,089 - track - ERROR: writeToSchemaParallel(outDf, defFile, t
oFile, retainFlag, delim='t', quotechar='"')
2017-12-17 16:02:55,153 - track - ERROR: File "C:Userssudhir.tiwariDocument
sAMEX2WorkspaceBackupTrunkcodeutils.py", line 510, in writeToSchemaParalle
l
2017-12-17 16:02:55,163 - track - ERROR: with open(fname) as infile:
2017-12-17 16:02:55,198 - track - ERROR: IOError
2017-12-17 16:02:55,233 - track - ERROR: :
2017-12-17 16:02:55,233 - track - ERROR: [Errno 2] No such file or directory: 'C
:/Users/sudhir.tiwari/Documents/AMEX2/Workspace/Input/work/Schindler_20171130/Sc
hindler_20171130_0.tsv'
,它没有像搜索位置找不到文件时写入文件一样。我正在使用多处理将数据框写入多个文件,然后将所有内容合并。将DF拆分为n部分。
使用多处理方式会比默认方式(直接保存)消耗更多的时间。通过使用过程之间的同步,请使用 Process 和 lock 并行写作过程。以下是样本POC。
import pandas as pd
import numpy as np
from multiprocessing import Lock, Process
from time import time
def writefile(df,l):
l.acquire()
df.to_csv('dataframe-multiprocessing.csv', index=False, mode='a', header=False)
l.release()
if __name__ == '__main__':
a = np.random.randint(1,1000,10000000)
b = np.random.randint(1,1000,10000000)
c = np.random.randint(1,1000,10000000)
df = pd.DataFrame(data={'a':a,'b':b,'c':c})
print('Iterative way:')
print()
new = time()
df.to_csv('dataframe-conventional.csv', index=False, mode='a', header=False)
print(time() - new, 'seconds')
print()
print('Multiprocessing way:')
print()
new = time()
l = Lock()
p = Process(target=writefile, args=(df,l))
p.start()
p.join()
print(time() - new, 'seconds')
print()
df1 = pd.read_csv('dataframe-conventional.csv')
df2 = pd.read_csv('dataframe-multiprocessing.csv')
print('If both file same or not:')
print(df1.equals(df2))
结果:
C:UsersAriffDocumentsGitHubtesting-code>python pandas_multip.py
Iterative way:
18.323541402816772 seconds
Multiprocessing way:
20.14128303527832 seconds
If both file same or not:
True
使用to_csv
时,您可以监视您的CPU使用情况,只要您的CPU空闲,您就可以使用multiprocess
加速进度。
一个简单的代码如下:
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
def write_csv(df, filename):
df.to_csv(filename)
df = pd.DataFrame({'c': ['a'*100]*100_000_000, })
N = 8
parts = np.array_split(df, N)
Parallel(n_jobs=N)(delayed(write_csv)(
part, f'part_{i}') for i, part in enumerate(parts))
这在我的机器中花费35s
。
但是,
df.to_csv
费用3min
。