Skip to content

Commit

Permalink
Merge pull request #5044 from hzhou/2101_coll_large
Browse files Browse the repository at this point in the history
coll: internally use MPI_Aint for count parameters

Approved-by: Ken Raffenetti <raffenet@mcs.anl.gov>
  • Loading branch information
hzhou authored Feb 12, 2021
2 parents dfb569f + 0801f66 commit 578ea61
Show file tree
Hide file tree
Showing 239 changed files with 1,800 additions and 1,914 deletions.
25 changes: 18 additions & 7 deletions maint/local_python/binding_c.py
Original file line number Diff line number Diff line change
Expand Up @@ -885,10 +885,20 @@ def push_impl_decl(func, impl_name=None):
impl_name = re.sub(r'^MPIX?_', 'MPIR_', func['name']) + "_impl"
if func['impl_param_list']:
params = ', '.join(func['impl_param_list'])
if func['dir'] == 'coll' and not RE.match(r'MPI_I', func['name']):
params = params + ", MPIR_Errflag_t *errflag"
if func['dir'] == 'coll':
# All collective impl function use MPI_Aint counts
params = re.sub(r' int (count|sendcount|recvcount),', r' MPI_Aint \1,', params)
# block collective use an extra errflag
if not RE.match(r'MPI_(I|Neighbor)', func['name']):
params = params + ", MPIR_Errflag_t *errflag"
else:
params="void"

if func['dir'] == 'coll':
# collective also dump MPIR_Xxx(...)
mpir_name = re.sub(r'^MPIX?_', 'MPIR_', func['name'])
G.impl_declares.append("int %s(%s);" % (mpir_name, params))
# dump MPIR_Xxx_impl(...)
G.impl_declares.append("int %s(%s);" % (impl_name, params))

def dump_CHECKENUM(var, errname, t, type="ENUM"):
Expand All @@ -907,23 +917,24 @@ def dump_CHECKENUM(var, errname, t, type="ENUM"):

def dump_body_coll(func):
# collectives call MPIR_Xxx
RE.match(r'MPI_(\w+)', func['name'])
name = RE.m.group(1)
mpir_name = re.sub(r'^MPIX?_', 'MPIR_', func['name'])

args = ", ".join(func['impl_arg_list'])
if name.startswith('I'):
if RE.match(r'mpi_i', func['name'], re.IGNORECASE):
# non-blocking collectives
G.out.append("MPIR_Request *request_ptr = NULL;")
dump_line_with_break("mpi_errno = MPIR_%s(%s);" % (name, args))
dump_line_with_break("mpi_errno = %s(%s);" % (mpir_name, args))
dump_error_check("")
G.out.append("if (!request_ptr) {")
G.out.append(" request_ptr = MPIR_Request_create_complete(MPIR_REQUEST_KIND__COLL);")
G.out.append("}")
G.out.append("*request = request_ptr->handle;")
elif RE.match(r'mpi_neighbor_', func['name'], re.IGNORECASE):
dump_line_with_break("mpi_errno = %s(%s);" % (mpir_name, args))
else:
# blocking collectives
G.out.append("MPIR_Errflag_t errflag = MPIR_ERR_NONE;")
dump_line_with_break("mpi_errno = MPIR_%s(%s, &errflag);" % (name, args))
dump_line_with_break("mpi_errno = %s(%s, &errflag);" % (mpir_name, args))

def dump_body_topo_fns(func, method):
comm_ptr = func['_has_comm'] + "_ptr"
Expand Down
48 changes: 10 additions & 38 deletions src/binding/c/coll_api.txt
Original file line number Diff line number Diff line change
Expand Up @@ -169,55 +169,27 @@ MPI_Iscatterv:
.desc: Scatters a buffer in parts to all processes in a communicator in a nonblocking way

MPI_Neighbor_allgather:
.desc: In this function, each process i gathers data items from each process j if an edge (j,i) exists in the topology graph, and each process i sends the same data items to all processes j where an edge (i,j) exists. The send buffer is sent to each neighboring process and the l-th block in the receive buffer is received from the l-th neighbor.
{
mpi_errno = MPIR_Neighbor_allgather(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}
.desc: Gathers data from all neighboring processes and distribute the combined data to all neighboring processes
/*
Notes:
In this function, each process i gathers data items from each process j if an edge (j,i) exists in the topology graph, and each process i sends the same data items to all processes j where an edge (i,j) exists. The send buffer is sent to each neighboring process and the l-th block in the receive buffer is received from the l-th neighbor.
*/

MPI_Neighbor_allgatherv:
.desc: The vector variant of MPI_Neighbor_allgather.
{
mpi_errno = MPIR_Neighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf,
recvcounts, displs, recvtype, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}

MPI_Neighbor_alltoall:
.desc: In this function, each process i receives data items from each process j if an edge (j,i) exists in the topology graph or Cartesian topology. Similarly, each process i sends data items to all processes j where an edge (i,j) exists. This call is more general than MPI_NEIGHBOR_ALLGATHER in that different data items can be sent to each neighbor. The k-th block in send buffer is sent to the k-th neighboring process and the l-th block in the receive buffer is received from the l-th neighbor.
{
mpi_errno = MPIR_Neighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}
.desc: Sends and Receivs data from all neighboring processes
/*
Notes:
In this function, each process i receives data items from each process j if an edge (j,i) exists in the topology graph or Cartesian topology. Similarly, each process i sends data items to all processes j where an edge (i,j) exists. This call is more general than MPI_NEIGHBOR_ALLGATHER in that different data items can be sent to each neighbor. The k-th block in send buffer is sent to the k-th neighboring process and the l-th block in the receive buffer is received from the l-th neighbor.
*/

MPI_Neighbor_alltoallv:
.desc: The vector variant of MPI_Neighbor_alltoall allows sending/receiving different numbers of elements to and from each neighbor.
{
mpi_errno = MPIR_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype,
recvbuf, recvcounts, rdispls, recvtype, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}

MPI_Neighbor_alltoallw:
.desc: Like MPI_Neighbor_alltoallv but it allows one to send and receive with different types to and from each neighbor.
{
mpi_errno = MPIR_Neighbor_alltoallw_impl(sendbuf, sendcounts, sdispls,
sendtypes, recvbuf, recvcounts,
rdispls, recvtypes, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}

MPI_Reduce:
.desc: Reduces values on all processes to a single value
Expand Down
Loading

0 comments on commit 578ea61

Please sign in to comment.