通过MPI发送链表



我看到这个问题被问了很多次,但没有找到一个可以解决我的问题的答案。我希望能够发送一个链表,在Fortran,通过MPI到另一个进程。我做过类似的事情,其中链表中的派生数据类型如下

type a
{
 integer :: varA
 type(a), pointer :: next=>null()
 real :: varB
}
我这样做的方式是创建一个MPI数据类型,其中包含所有的varA值在一起,并接收它作为整数数组。然后对变量b做同样的处理。

我现在要做的是创建链表,然后将所有varA和varB值打包在一起以形成MPI数据类型。我在下面给出了执行此操作的代码。

PROGRAM TEST
 USE MPI
 IMPLICIT NONE
 TYPE a
  INTEGER:: b
  REAL :: e
  TYPE(a), POINTER :: nextPacketInList => NULL()     
 END TYPE 
 TYPE PacketComm
    INTEGER :: numPacketsToComm  
    TYPE(a), POINTER :: PacketListHeadPtr => NULL()
    TYPE(a), POINTER :: PacketListTailPtr => NULL()
 END TYPE PacketComm
 TYPE(PacketComm), DIMENSION(:), ALLOCATABLE :: PacketCommArray
 INTEGER :: packPacketDataType !New data type 
 INTEGER :: ierr, size, rank, dest, ind
 integer :: b
 real :: e
 CALL MPI_INIT(ierr)
 CALL MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)
 CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
 IF(.NOT. ALLOCATED(PacketCommArray)) THEN
   ALLOCATE(PacketCommArray(0:size-1), STAT=ierr)
   DO ind=0, size-1
     PacketCommArray(ind)%numPacketsToComm = 0
   END DO
 ENDIF
 b = 2
 e = 4
 dest = 1
 CALL addPacketToList(b, e, dest)
 b = 3
 e = 5
 dest = 1
 CALL addPacketToList(b, e, dest)
 dest = 1
 CALL packPacketList(dest)
 IF(rank == 0) THEN
  dest = 1
  CALL sendPacketList(dest)
 ELSE
  CALL recvPacketList()
 ENDIF
 CALL MPI_FINALIZE(ierr)
 CONTAINS
SUBROUTINE addPacketToList(b, e, rank)
 IMPLICIT NONE
 INTEGER :: b, rank, ierr
 REAL :: e
 TYPE(a), POINTER :: head
 IF(.NOT. ASSOCIATED(PacketCommArray(rank)%PacketListHeadPtr)) THEN
   ALLOCATE(PacketCommArray(rank)%PacketListHeadPtr, STAT=ierr)
   PacketCommArray(rank)%PacketListHeadPtr%b = b
   PacketCommArray(rank)%PacketListHeadPtr%e = e
   PacketCommArray(rank)%PacketListHeadPtr%nextPacketInList => NULL()
   PacketCommArray(rank)%PacketListTailPtr => PacketCommArray(rank)%PacketListHeadPtr
   PacketCommArray(rank)%numPacketsToComm = PacketCommArray(rank)%numPacketsToComm+1
 ELSE
   ALLOCATE(PacketCommArray(rank)%PacketListTailPtr%nextPacketInList, STAT=ierr)
   PacketCommArray(rank)%PacketListTailPtr =>      PacketCommArray(rank)%PacketListTailPtr%nextPacketInList
   PacketCommArray(rank)%PacketListTailPtr%b = b
   PacketCommArray(rank)%PacketListTailPtr%e = e
   PacketCommArray(rank)%PacketListTailPtr%nextPacketInList => NULL()
   PacketCommArray(rank)%numPacketsToComm = PacketCommArray(rank)%numPacketsToComm+1
 ENDIF
END SUBROUTINE addPacketToList
SUBROUTINE packPacketList(rank)
  IMPLICIT NONE
  INTEGER :: rank
  INTEGER :: numListNodes
  INTEGER(kind=MPI_ADDRESS_KIND), DIMENSION(:), ALLOCATABLE :: listNodeAddr
  INTEGER(kind=MPI_ADDRESS_KIND), DIMENSION(:), ALLOCATABLE :: listNodeDispl
  INTEGER, DIMENSION(:), ALLOCATABLE :: listNodeTypes
 INTEGER, DIMENSION(:), ALLOCATABLE :: listNodeCount
 TYPE(a), POINTER :: head
 INTEGER :: numNode
 head => PacketCommArray(rank)%PacketListHeadPtr
 numListNodes = PacketCommArray(rank)%numPacketsToComm
 PRINT *, ' Number of nodes to allocate for rank ', rank , ' is ', numListNodes
 ALLOCATE(listNodeTypes(2*numListNodes), stat=ierr)
 ALLOCATE(listNodeCount(2*numListNodes), stat=ierr)
 DO numNode=1, 2*numListNodes, 2
   listNodeTypes(numNode) = MPI_INTEGER
   listNodeTypes(numNode+1) = MPI_REAL
 END DO
 DO numNode=1, 2*numListNodes, 2
   listNodeCount(numNode) = 1
   listNodeCount(numNode+1) = 1
 END DO
 ALLOCATE(listNodeAddr(2*numListNodes), stat=ierr)
 ALLOCATE(listNodeDispl(2*numListNodes), stat=ierr)
 numNode = 1
 DO WHILE(ASSOCIATED(head))
  CALL MPI_GET_ADDRESS(head%b, listNodeAddr(numNode), ierr)
  CALL MPI_GET_ADDRESS(head%e, listNodeAddr(numNode+1), ierr)
  numNode = numNode + 2
  head => head%nextPacketInList
 END DO
 DO numNode=1, UBOUND(listNodeAddr,1)
  listNodeDispl(numNode) = listNodeAddr(numNode) - listNodeAddr(1)
 END DO
 CALL MPI_TYPE_CREATE_STRUCT(UBOUND(listNodeAddr,1), listNodeCount, listNodeDispl,  listNodeTypes, packPacketDataType, ierr)
 CALL MPI_TYPE_COMMIT(packPacketDataType, ierr)
END SUBROUTINE packPacketList
SUBROUTINE sendPacketList(rank)
 IMPLICIT NONE
 INTEGER :: rank, ierr, numNodes
 TYPE(a), POINTER :: head
 head => PacketCommArray(rank)%PacketListHeadPtr
 numNodes = PacketCommArray(rank)%numPacketsToComm
 CALL MPI_SSEND(head%b, 1, packPacketDataType, rank, 0, MPI_COMM_WORLD, ierr)
END SUBROUTINE sendPacketList
SUBROUTINE recvPacketList
 IMPLICIT NONE
 TYPE(a), POINTER :: head
 TYPE(a), DIMENSION(:), ALLOCATABLE :: RecvPacketCommArray
 INTEGER, DIMENSION(:), ALLOCATABLE :: recvB
 INTEGER :: numNodes, ierr, numNode
 INTEGER, DIMENSION(MPI_STATUS_SIZE):: status
 head => PacketCommArray(rank)%PacketListHeadPtr
 numNodes = PacketCommArray(rank)%numPacketsToComm
 ALLOCATE(RecvPacketCommArray(numNodes), stat=ierr)
 ALLOCATE(recvB(numNodes), stat=ierr)
 CALL MPI_RECV(RecvPacketCommArray, 1, packPacketDataType, 0, 0, MPI_COMM_WORLD, status, ierr)
 DO numNode=1, numNodes
    PRINT *, ' value in b', RecvPacketCommArray(numNode)%b
    PRINT *, ' value in e', RecvPacketCommArray(numNode)%e
 END DO
END SUBROUTINE recvPacketList
END PROGRAM TEST

基本上我创建了一个有两个节点的链表包含以下数据

节点1B = 2, e = 4

节点2B = 3, e = 5

当我在两个核心上运行这段代码时,我在核心1上得到的结果是

value in b           2
value in e   4.000000
value in b           0
value in e  0.0000000E+00

所以我的代码似乎发送的数据在链表的第一个节点正确,但不是第二个。请有人让我知道,如果我想做的是可行的,什么是错误的代码。我知道我可以把所有节点上的b的值一起发送然后把e的值一起发送。但是我的派生数据类型可能会包含更多的变量(包括数组),我希望能够一次发送所有的数据,而不是使用多次发送。

谢谢

这对我来说是不容易阅读的代码,但似乎你期望接收缓冲区获得连续的数据,这不是情况。通过计算地址偏移量构造的奇怪类型将与接收缓冲区不匹配。为了说明这一点,我想我可以给出这个简单的例子(它写得很快,不要把它当作一个好的代码示例):

program example
use mpi
integer :: nprocs, myrank
integer :: buf(4)
integer :: n_elements
integer :: len_element(2)
integer(MPI_ADDRESS_KIND) :: disp_element(2)
integer :: type_element(2)
integer :: newtype
integer :: istat(MPI_STATUS_SIZE)
integer :: ierr
call mpi_init(ierr)
call mpi_comm_size(mpi_comm_world, nprocs, ierr)
call mpi_comm_rank(mpi_comm_world, myrank, ierr)
! simple example illustrating mpi_type_create_struct
! take an integer array buf(4):
! on rank 0: [ 7, 2, 6, 4 ]
! on rank 1: [ 1, 1, 1, 1 ]
! and we create a struct to send only elements 1 and 3
! so that on rank 1 we'll get [7, 1, 6, 1]
if (myrank == 0) then
  buf = [7, 2, 6, 4]
else
  buf = 1
end if
n_elements = 2
len_element = 1
disp_element(1) = 0
disp_element(2) = 8
type_element = MPI_INTEGER
call mpi_type_create_struct(n_elements, len_element, disp_element, type_element, newtype, ierr)
call mpi_type_commit(newtype, ierr)
write(6,'(1x,a,i2,1x,a,4i2)') 'SEND| rank ', myrank, 'buf = ', buf
if (myrank == 0) then
  call mpi_send (buf, 1, newtype, 1, 13, MPI_COMM_WORLD, ierr)
else
  call mpi_recv (buf, 1, newtype, 0, 13, MPI_COMM_WORLD, istat, ierr)
  !the below call does not scatter the received integers, try to see the difference
  !call mpi_recv (buf, 2, MPI_INTEGER, 0, 13, MPI_COMM_WORLD, istat, ierr)
end if
write(6,'(1x,a,i2,1x,a,4i2)') 'RECV| rank ', myrank, 'buf = ', buf
end program

我希望这清楚地表明接收缓冲区必须容纳构造类型中的任何偏移,并且不接收任何连续数据。

编辑:更新代码以说明不分散数据的不同接收类型

相关内容

  • 没有找到相关文章