增加多进程时间



我定义了一个多进程脚本来改进图像分析。它似乎工作得很好,但我尝试进行了几次测试以定义最佳进程数。它包括改变这个进程数。由于存在一些分散,我添加一个循环以重复一百次我的测试。但在这个过程中,时间会大大增加。我的问题的根源可能是什么?我必须刷新内存吗?但它似乎没有饱和度。

我的一段代码:

from multiprocessing import Process, current_process
import multiprocessing 
import glob as glob
import matplotlib.pyplot as plt
from skimage import io
import time
import sys
import numpy as np
import numpy.ma as ma
import gc
import os
from PIL import Image
from skimage import exposure
import cv2 

Path_input = "E:\test\raw\"
Path_output = "E:\test\"
Img_list = glob.glob((Path_input + 'Test_*.tif' ))[:]
size_y,size_x = io.imread(Img_list[0]).shape
#Function for the multi process
def Ajustement(x):
    #image reading
    img = plt.imread(Img_list[x])
    #create a CLAHE object
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    cl1 = clahe.apply(img_rescale.astype(np.uint16))    
    cv2.imwrite(Path_output+ '\Ajusted' + "%05d" % x + '.tif',cl1) 
    return 'Ajustement OK!'

#create strings list of process
cpu_max = 10
list = ['Process_']*cpu_max
list_process =[]
counter = 1
for u in list:
    list_process.append(list[counter-1]+np.str(counter))
    counter = counter+1
get_timer = time.clock if sys.platform == "win32" else time.time
time_store = []
time_process = []
if __name__ == '__main__':
    range_adjusted = np.arange(0,len(Img_list),cpu_max)
    m=0
    for m in range(0,100,1): #loop for obtain a mean time for the process
        print m
        timer = get_timer()  # time measuring starts now
        for i in range_adjusted:
            o = 0
            for item in list_process[:cpu_max]: #process creation
                globals()[item] = Process(name ='worker1', target=Normalization_and_ajustement, args=(i+o,))
                o=o+1               
            o = 0
            for item in list_process[:cpu_max]: #process start
                globals()[item].start() 
                o=o+1               
            o = 0
            for item in list_process[:cpu_max]: #process join
                globals()[item].join()  
                o=o+1   
            if i == range_adjusted.max():
                print("Normalization and Equalization finished")
                timer = get_timer() - timer  # get delta time as soon as it finishes
                time_store.append(timer)
                time_process.append(timer/cpu_max)
                np.savetxt(Path_output + 'time_tot_normalization.txt',time_store)
                np.savetxt(Path_output + 'time_process_normalization.txt',time_process)
                print("tTotal: {:.2f} seconds".format(timer))
                print("tAvg. per process: {:.2f} seconds".format(timer/cpu_max))
        m=m+1

我认为这是由于内存泄漏.事实上,我在每次循环后都添加了gc.collect()命令,问题就解决了。

相关内容

  • 没有找到相关文章

最新更新