MPI Scatterv错误没有针对通用' mpi_scatterv '的特定子例程



我正在尝试运行此代码,错误提示'没有特定的子程序用于通用' mpi_scatterv'

program mpiscatterv
!implicit none
use mpi
real, dimension (:,:), allocatable :: r, rcv_buf
integer :: ierr, my_id, n_proc, rcv_id, snd_id, counter
integer, dimension (:), allocatable :: sendcounts, displs, rcv_count
integer, parameter :: master = 0
integer :: i,j,k
integer :: n = 0
integer :: ios_read = 0
integer :: rem ! remaining data 
integer :: div
integer :: summ = 0


open (unit=99, file ='datatest1.dat',iostat=ios_read)
if (ios_read /=0) then
        print*, 'could not be opened'
end if
!open (unit=99, file='rawdata2.dat',iostat=ios_read)
do
  read (99, *, iostat=ios_read) i,x,y
    if (ios_read > 0) then
        print*,'something is wrong'
        print*,ios_read
        stop
   else if (ios_read < 0) then
        print*, 'end of file is reached'
        exit
   else
        n = n+1
   end if
end do
rewind(99)
open(unit=98, file='rawdata2.dat')
allocate(r(2,n))
do i=1,n
read(99,*, iostat=ios_read)j,x,y
r(1,j)= x
r(2,j)= y
write (98,*) x, y
end do
close (99)
close (98)
call mpi_init(ierr)
call mpi_comm_rank(mpi_comm_world, my_id, ierr)
call mpi_comm_size(mpi_comm_world, n_proc, ierr)
rem = mod(2*n,n_proc)
allocate (sendcounts(n_proc))
allocate (displs(n_proc))
allocate (rcv_count(n_proc))
allocate (rcv_buf(2,n_proc))

counter = 1
do while (counter<=n_proc)
sendcounts(counter) = int(2*n/n_proc)
  if (rem > 0) then
      sendcounts(counter)=int(2*n/n_proc)+2
    rem = rem-2
  end if
rcv_count=sendcounts
displs(counter)=summ
summ=summ+sendcounts(counter)
counter = counter + 1
end do
counter = 1
if (my_id==0) then
   do while (counter<n_proc)
     print*,sendcounts, displs
     counter = counter + 1
   end do
end if

call MPI_Scatterv(r,sendcounts,displs,mpi_real,rcv_buf,rcv_count,mpi_real,0,mpi_comm_world,ierr)
call mpi_finalize(ierr)
end program

我有r个数据要分散。R是2列n行。我使用scatterv是因为数据不能被n_proc整除。当我想编译它时,它显示错误,就我而言,我已经根据我从互联网上得到的有限指导,任何网站。

您的代码有几个问题。我试图用gfortran v6.3和openMPI v3.1.4编译你的代码(main.f90)来重现你的错误

mpifort main.f90                                                                         
test3.f90:85:106:                                                                                                                           
                                                                                                                                            
 call MPI_Scatterv(r(1,:),sendcounts,displs,mpi_real,rcv_buf(1,:),rcv_count,mpi_real,0,mpi_comm_world,ierr)                                 
                                                                                                          1                                 
Error: There is no specific subroutine for the generic ‘mpi_scatterv’ at (1)

在openMPI的网站上,您可以看到MPI_Scatterv需要以下内容:

Input Parameters
sendbuf
    Address of send buffer (choice, significant only at root). 
sendcounts
    Integer array (of length group size) specifying the number of elements to send to each processor. 
displs
    Integer array (of length group size). Entry i specifies the displacement (relative to sendbuf) from which to take the outgoing data to process i. 
sendtype
    Datatype of send buffer elements (handle). 
recvcount
    Number of elements in receive buffer (integer). 
recvtype
    Datatype of receive buffer elements (handle). 
root
    Rank of sending process (integer). 
comm
    Communicator (handle). 

问题是recvcount(或在您的情况下rcv_count)应该只是一个整数。

我应该指出你应该解决的其他几个问题:

  • 根据其他人的建议,您应该取消注释implicit none
  • xy将是未定义的
  • 你真的应该避免在MPI中发送n维数组
  • 你的rcv_buf几乎肯定是错误的尺寸。我认为至少应该是n/n_proc

下面的代码可以编译,但是您需要仔细检查。它可能不工作。

program mpiscatterv
  use mpi
  implicit none
  real, dimension (:,:), allocatable :: r, rcv_buf
  integer :: ierr, my_id, n_proc, rcv_id, snd_id, counter
  integer, dimension (:), allocatable :: sendcounts, displs
  integer, parameter :: master = 0
  integer :: i,j,k, rcv_count
  real    :: x, y
  integer :: n = 0
  integer :: ios_read = 0
  integer :: rem ! remaining data 
  integer :: div
  integer :: summ = 0
  open (unit=99, file ='datatest1.dat',iostat=ios_read)
  if (ios_read /=0) then
    print*, 'could not be opened'
  end if
  !open (unit=99, file='rawdata2.dat',iostat=ios_read)
  do
    read (99, *, iostat=ios_read) i,x,y
    if (ios_read > 0) then
      print*,'something is wrong'
      print*,ios_read
      stop
    else if (ios_read < 0) then
      print*, 'end of file is reached'
      exit
    else
      n = n+1
    end if
  end do
  rewind(99)
  open(unit=98, file='rawdata2.dat')
  allocate(r(2,n))
  do i=1,n
    read(99,*, iostat=ios_read)j,x,y
    r(1,j)= x
    r(2,j)= y
    write (98,*) x, y
  end do
  close (99)
  close (98)
  call mpi_init(ierr)
  call mpi_comm_rank(mpi_comm_world, my_id, ierr)
  call mpi_comm_size(mpi_comm_world, n_proc, ierr)
  rem = mod(2*n,n_proc)
  allocate (sendcounts(n_proc))
  allocate (displs(n_proc))
  allocate (rcv_buf(2,n/n_proc))

  counter = 1
  do while (counter<=n_proc)
    sendcounts(counter) = int(2*n/n_proc)
    if (rem > 0) then
      sendcounts(counter)=int(2*n/n_proc)+2
      rem = rem-2
    end if
    displs(counter)=summ
    summ=summ+sendcounts(counter)
    counter = counter + 1
  end do
  counter = 1
  if (my_id==0) then
    do while (counter<n_proc)
      print*,sendcounts, displs
      counter = counter + 1
    end do
  end if

  call MPI_Scatterv(r(1,:),sendcounts,displs,mpi_real,rcv_buf(1,:),rcv_count,mpi_real,0,mpi_comm_world,ierr)
  call mpi_finalize(ierr)
end program

最新更新