python中的蒙特卡罗模拟:使用ipyparallel的并行化比串行化花费更长的时间



hei,

我正在对散射介质中的光子输运进行蒙特卡罗模拟。我正试图将其并行化,但与串行模拟相比,很难观察到运行时间的任何性能改进

蒙特卡罗代码可以在下面找到。Photon类包含用于计算单个光子的传输和散射的各种方法,而RunPhotonPackage类针对给定厚度L的散射介质运行一系列N的光子。这些是目前我唯一的输入参数:

import matplotlib.pyplot as plt
import numpy as np
from numpy.random import random as rand

NPHOTONS = 100000 # Nb photons
PI  = np.pi
EPS = 1.e-6
L = 100. # scattering layer thickness
class Photon():
    mut = 0.02 
    k = [0,0,1]
    def __init__(self,ko,pos):
        Photon.k = ko
        self.x = pos[0]
        self.y = pos[1]
        self.z = pos[2]
    def move(self):
        ksi = rand(1)
        s = -np.log(1-ksi)/Photon.mut
        self.x = self.x + s*Photon.k[0]
        self.y = self.y + s*Photon.k[1]
        self.z = self.z + s*Photon.k[2]       
        zPos = self.z
        return zPos 
    def exittop(self):
        newZpos = 0

    def exitbase(self):
        newZpos = 0

    def HG(self,g):
        rand_teta = rand(1)
        costeta = 0.5*(1+g**2-((1-g**2)/(1-g + 2.*g*rand_teta))**2)/g
        return costeta
    def scatter(self):
        # calculate new angle of scattering
        phi = 2*PI*rand(1)                
        costeta = self.HG(0.85)
        sinteta = (1-costeta**2)**0.5 

        sinphi = np.sin(phi) 
        cosphi = np.cos(phi)
        temp = (1-Photon.k[2]**2)**0.5
        if np.abs(temp) > EPS:        
            mux = sinteta*(Photon.k[0]*Photon.k[2]*cosphi-Photon.k[1]*sinphi)/temp + Photon.k[0]*costeta 
            muy = sinteta*(Photon.k[1]*Photon.k[2]*cosphi+Photon.k[0]*sinphi)/temp + Photon.k[1]*costeta
            muz = -sinteta*cosphi*temp + Photon.k[2]*costeta
        else:
            mux = sinteta*cosphi 
            muy = sinteta*sinphi
            if Photon.k[2]>=0:
                muz = costeta
            else:
                muz = -costeta

        # update the new direction of the photon 
        Photon.k[0] = mux
        Photon.k[1] = muy
        Photon.k[2] = muz        

class RunPhotonPackage():
    def __init__(self,L,NPHOTONS):
        self.L = L
        self.NPHOTONS = NPHOTONS
    def RunPhoton(self):
        Dist_Pos = np.zeros((3,self.NPHOTONS))
        # loop over number of photon
        for i in range(self.NPHOTONS):
            # inititate initial photon direction
            k_init = [0,0,1]
            k_init_norm = k_init/np.linalg.norm(k_init) # initial photon direction.
            # initiate new photon with initial direction   
            pos_init = [0,0,0]
            newPhoton = Photon(k_init_norm,pos_init)
            newZpos = 0.
            # while the photon is still in the layer, move it and scatter it
            while ((newZpos >= 0.) and (newZpos <= self.L)):
                newZpos = newPhoton.move()
                newscatter = newPhoton.scatter()
            Dist_Pos[0,i] = newPhoton.x
            Dist_Pos[1,i] = newPhoton.y
            Dist_Pos[2,i] = newPhoton.z

        return Dist_Pos

我运行以下串行代码来记录不同层厚度长度和给定光子数的位置直方图。

import time
tic = time.time()
dictresult = {}
for L in np.arange(10,100,10):
    print('L={0} m'.format(L))
    Dist_Pos = RunPhotonPackage(L,10000).RunPhoton()
    dictresult['{0}'.format(L)]=Dist_Pos
toc = time.time()
print('sec Elapsed: {0}s'.format(toc-tic))

然后这个磨合:

sec Elapsed: 26.425330162s

当我尝试使用ipyparallel:并行化代码时

import ipyparallel
clients = ipyparallel.Client()
clients.ids
dview = clients[:]
dview.execute('import numpy as np')
dview.execute('from numpy.random import random as rand')
dview['PI'] = np.pi
dview['EPS']= 1.e-6
dview.push({"Photon": Photon, "RunPhotonPackage": RunPhotonPackage})
def RunPhotonPara(L):
    LayerL = RunPhotonPackage(L,10000)
    dPos = LayerL.RunPhoton()
    return dPos
tic = time.time()
dictresultpara = []
for L in np.arange(10,100,10):
    print('L={0}'.format(L))
    value = dview.apply_async(RunPhotonPara,L)
    dictresultpara.append(value)
    clients.wait(dictresultpara)
toc = time.time()
print('sec Elapsed: {0}s'.format(toc-tic))

它运行在:

sec Elapsed: 55.4289810658s

所以时间增加了一倍多!!!我在ubuntu 32位上运行这个程序,有四个核心,并使用ipcluster start -n 4在localhost上启动一个控制器和四个引擎。我原以为并行代码的运行时间大约是串行代码的1/4,但显然不是。

为什么会这样,以及如何纠正?

谢谢你的建议。

Greg

我做了一些更改以简化您的示例。串行版本在我的Mac上运行大约18秒,而带有4个引擎的并行版本运行大约一半的时间。考虑到任务的持续时间不均衡,这似乎是合理的。

按照之前的设置方式,引擎中出现了错误,因此返回速度很快。通过字典传递类似乎是不够的。相反,代码现在导入定义每个引擎上的类的模块请注意,我只是为这个例子入侵了sys.path,但据推测,在生产环境中,您会适当地处理这个问题。

我认为你不想在循环中"等待"。此外,async_map()方法似乎比async_apply()更方便。

要运行此操作,请创建一个目录,将以下代码复制到该目录中名为"photon.py"的文件中,并在其中创建一个空的"init.py"。修改插入sys.path的代码中的行,以引用您的新目录。更改那里的目录并运行"pythonphoton.py":

# photon.py
import ipyparallel
import numpy as np
from numpy.random import random as rand
import time
NPHOTONS = 100000 # Nb photons
PI  = np.pi
EPS = 1.e-6
L = 100. # scattering layer thickness
class Photon():
    mut = 0.02 
    k = [0,0,1]
    def __init__(self,ko,pos):
        Photon.k = ko
        self.x = pos[0]
        self.y = pos[1]
        self.z = pos[2]
    def move(self):
        ksi = rand(1)
        s = -np.log(1-ksi)/Photon.mut
        self.x = self.x + s*Photon.k[0]
        self.y = self.y + s*Photon.k[1]
        self.z = self.z + s*Photon.k[2]       
        zPos = self.z
        return zPos 
    def exittop(self):
        newZpos = 0

    def exitbase(self):
        newZpos = 0

    def HG(self,g):
        rand_teta = rand(1)
        costeta = 0.5*(1+g**2-((1-g**2)/(1-g + 2.*g*rand_teta))**2)/g
        return costeta
    def scatter(self):
        # calculate new angle of scattering
        phi = 2*PI*rand(1)                
        costeta = self.HG(0.85)
        sinteta = (1-costeta**2)**0.5 

        sinphi = np.sin(phi) 
        cosphi = np.cos(phi)
        temp = (1-Photon.k[2]**2)**0.5
        if np.abs(temp) > EPS:        
            mux = sinteta*(Photon.k[0]*Photon.k[2]*cosphi-Photon.k[1]*sinphi)/temp + Photon.k[0]*costeta 
            muy = sinteta*(Photon.k[1]*Photon.k[2]*cosphi+Photon.k[0]*sinphi)/temp + Photon.k[1]*costeta
            muz = -sinteta*cosphi*temp + Photon.k[2]*costeta
        else:
            mux = sinteta*cosphi 
            muy = sinteta*sinphi
            if Photon.k[2]>=0:
                muz = costeta
            else:
                muz = -costeta

        # update the new direction of the photon 
        Photon.k[0] = mux
        Photon.k[1] = muy
        Photon.k[2] = muz        

class RunPhotonPackage():
    def __init__(self,L,NPHOTONS):
        self.L = L
        self.NPHOTONS = NPHOTONS
    def RunPhoton(self):
        Dist_Pos = np.zeros((3,self.NPHOTONS))
        # loop over number of photon
        for i in range(self.NPHOTONS):
            # inititate initial photon direction
            k_init = [0,0,1]
            k_init_norm = k_init/np.linalg.norm(k_init) # initial photon direction.
            # initiate new photon with initial direction   
            pos_init = [0,0,0]
            newPhoton = Photon(k_init_norm,pos_init)
            newZpos = 0.
            # while the photon is still in the layer, move it and scatter it
            while ((newZpos >= 0.) and (newZpos <= self.L)):
                newZpos = newPhoton.move()
                newscatter = newPhoton.scatter()
            Dist_Pos[0,i] = newPhoton.x
            Dist_Pos[1,i] = newPhoton.y
            Dist_Pos[2,i] = newPhoton.z
        return Dist_Pos
def RunPhoton(L):
    print('L={0}'.format(L))
    return RunPhotonPackage(L, 10000).RunPhoton()
def serialTest(values):
    print "Running serially..."
    tic = time.time()
    results = map(RunPhoton, values)
    print results
    toc = time.time()
    print('sec Elapsed: {0}s'.format(toc-tic))
def parallelTest(values):
    print "Running in parallel..."
    client = ipyparallel.Client()
    view = client[:]
    view.execute('import sys')
    # CHANGE THIS PATH TO REFER TO WHEREVER YOU PUT THIS CODE
    view.execute('sys.path.insert(0, "/Users/rjp/ipp")')
    view.execute('from photon import *')
    tic = time.time()
    asyncResults = view.map_async(RunPhoton, values)
    print asyncResults.get()
    toc = time.time()
    print('sec Elapsed: {0}s'.format(toc-tic))    

if __name__ == "__main__":
    values = np.arange(10, 100, 10)
    serialTest(values)
    parallelTest(values)

最新更新