Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

coll: internally use MPI_Aint for count parameters #5044

Merged
merged 4 commits into from
Feb 12, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 18 additions & 7 deletions maint/local_python/binding_c.py
Original file line number Diff line number Diff line change
Expand Up @@ -885,10 +885,20 @@ def push_impl_decl(func, impl_name=None):
impl_name = re.sub(r'^MPIX?_', 'MPIR_', func['name']) + "_impl"
if func['impl_param_list']:
params = ', '.join(func['impl_param_list'])
if func['dir'] == 'coll' and not RE.match(r'MPI_I', func['name']):
params = params + ", MPIR_Errflag_t *errflag"
if func['dir'] == 'coll':
# All collective impl function use MPI_Aint counts
params = re.sub(r' int (count|sendcount|recvcount),', r' MPI_Aint \1,', params)
# block collective use an extra errflag
if not RE.match(r'MPI_(I|Neighbor)', func['name']):
params = params + ", MPIR_Errflag_t *errflag"
else:
params="void"

if func['dir'] == 'coll':
# collective also dump MPIR_Xxx(...)
mpir_name = re.sub(r'^MPIX?_', 'MPIR_', func['name'])
G.impl_declares.append("int %s(%s);" % (mpir_name, params))
# dump MPIR_Xxx_impl(...)
G.impl_declares.append("int %s(%s);" % (impl_name, params))

def dump_CHECKENUM(var, errname, t, type="ENUM"):
Expand All @@ -907,23 +917,24 @@ def dump_CHECKENUM(var, errname, t, type="ENUM"):

def dump_body_coll(func):
# collectives call MPIR_Xxx
RE.match(r'MPI_(\w+)', func['name'])
name = RE.m.group(1)
mpir_name = re.sub(r'^MPIX?_', 'MPIR_', func['name'])

args = ", ".join(func['impl_arg_list'])
if name.startswith('I'):
if RE.match(r'mpi_i', func['name'], re.IGNORECASE):
# non-blocking collectives
G.out.append("MPIR_Request *request_ptr = NULL;")
dump_line_with_break("mpi_errno = MPIR_%s(%s);" % (name, args))
dump_line_with_break("mpi_errno = %s(%s);" % (mpir_name, args))
dump_error_check("")
G.out.append("if (!request_ptr) {")
G.out.append(" request_ptr = MPIR_Request_create_complete(MPIR_REQUEST_KIND__COLL);")
G.out.append("}")
G.out.append("*request = request_ptr->handle;")
elif RE.match(r'mpi_neighbor_', func['name'], re.IGNORECASE):
dump_line_with_break("mpi_errno = %s(%s);" % (mpir_name, args))
else:
# blocking collectives
G.out.append("MPIR_Errflag_t errflag = MPIR_ERR_NONE;")
dump_line_with_break("mpi_errno = MPIR_%s(%s, &errflag);" % (name, args))
dump_line_with_break("mpi_errno = %s(%s, &errflag);" % (mpir_name, args))

def dump_body_topo_fns(func, method):
comm_ptr = func['_has_comm'] + "_ptr"
Expand Down
48 changes: 10 additions & 38 deletions src/binding/c/coll_api.txt
Original file line number Diff line number Diff line change
Expand Up @@ -169,55 +169,27 @@ MPI_Iscatterv:
.desc: Scatters a buffer in parts to all processes in a communicator in a nonblocking way

MPI_Neighbor_allgather:
.desc: In this function, each process i gathers data items from each process j if an edge (j,i) exists in the topology graph, and each process i sends the same data items to all processes j where an edge (i,j) exists. The send buffer is sent to each neighboring process and the l-th block in the receive buffer is received from the l-th neighbor.
{
mpi_errno = MPIR_Neighbor_allgather(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}
.desc: Gathers data from all neighboring processes and distribute the combined data to all neighboring processes
/*
Notes:
In this function, each process i gathers data items from each process j if an edge (j,i) exists in the topology graph, and each process i sends the same data items to all processes j where an edge (i,j) exists. The send buffer is sent to each neighboring process and the l-th block in the receive buffer is received from the l-th neighbor.
*/

MPI_Neighbor_allgatherv:
.desc: The vector variant of MPI_Neighbor_allgather.
{
mpi_errno = MPIR_Neighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf,
recvcounts, displs, recvtype, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}

MPI_Neighbor_alltoall:
.desc: In this function, each process i receives data items from each process j if an edge (j,i) exists in the topology graph or Cartesian topology. Similarly, each process i sends data items to all processes j where an edge (i,j) exists. This call is more general than MPI_NEIGHBOR_ALLGATHER in that different data items can be sent to each neighbor. The k-th block in send buffer is sent to the k-th neighboring process and the l-th block in the receive buffer is received from the l-th neighbor.
{
mpi_errno = MPIR_Neighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}
.desc: Sends and Receivs data from all neighboring processes
/*
Notes:
In this function, each process i receives data items from each process j if an edge (j,i) exists in the topology graph or Cartesian topology. Similarly, each process i sends data items to all processes j where an edge (i,j) exists. This call is more general than MPI_NEIGHBOR_ALLGATHER in that different data items can be sent to each neighbor. The k-th block in send buffer is sent to the k-th neighboring process and the l-th block in the receive buffer is received from the l-th neighbor.
*/

MPI_Neighbor_alltoallv:
.desc: The vector variant of MPI_Neighbor_alltoall allows sending/receiving different numbers of elements to and from each neighbor.
{
mpi_errno = MPIR_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype,
recvbuf, recvcounts, rdispls, recvtype, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}

MPI_Neighbor_alltoallw:
.desc: Like MPI_Neighbor_alltoallv but it allows one to send and receive with different types to and from each neighbor.
{
mpi_errno = MPIR_Neighbor_alltoallw_impl(sendbuf, sendcounts, sdispls,
sendtypes, recvbuf, recvcounts,
rdispls, recvtypes, comm_ptr);
if (mpi_errno) {
goto fn_fail;
}
}

MPI_Reduce:
.desc: Reduces values on all processes to a single value
Expand Down
Loading