Skip to content

Commit

Permalink
Merge pull request #2950 from Akshay-Venkatesh/managed-mem-1.4.x
Browse files Browse the repository at this point in the history
UCP/UCT: CUDA Managed memory support
  • Loading branch information
shamisp authored Oct 15, 2018
2 parents f96c4e1 + 1e47640 commit 007e885
Show file tree
Hide file tree
Showing 7 changed files with 10 additions and 5 deletions.
2 changes: 1 addition & 1 deletion src/ucp/core/ucp_context.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ static const char * ucp_rndv_modes[] = {

uct_memory_type_t ucm_to_uct_mem_type_map[] = {
[UCM_MEM_TYPE_CUDA] = UCT_MD_MEM_TYPE_CUDA,
[UCM_MEM_TYPE_CUDA_MANAGED] = UCT_MD_MEM_TYPE_HOST
[UCM_MEM_TYPE_CUDA_MANAGED] = UCT_MD_MEM_TYPE_CUDA_MANAGED
};

static ucs_config_field_t ucp_config_table[] = {
Expand Down
1 change: 1 addition & 0 deletions src/ucp/core/ucp_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,5 +175,6 @@ ucp_memh2uct(ucp_mem_h memh, ucp_md_index_t md_idx)
})

#define UCP_MEM_IS_HOST(_mem_type) ((_mem_type) == UCT_MD_MEM_TYPE_HOST)
#define UCP_MEM_IS_CUDA_MANAGED(_mem_type) ((_mem_type) == UCT_MD_MEM_TYPE_CUDA_MANAGED)

#endif
3 changes: 2 additions & 1 deletion src/ucp/core/ucp_request.inl
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,8 @@ ucp_request_recv_data_unpack(ucp_request_t *req, const void *data,

switch (req->recv.datatype & UCP_DATATYPE_CLASS_MASK) {
case UCP_DATATYPE_CONTIG:
if (ucs_likely(UCP_MEM_IS_HOST(req->recv.mem_type))) {
if ((ucs_likely(UCP_MEM_IS_HOST(req->recv.mem_type))) ||
(ucs_likely(UCP_MEM_IS_CUDA_MANAGED(req->recv.mem_type)))) {
UCS_PROFILE_NAMED_CALL("memcpy_recv", memcpy, req->recv.buffer + offset,
data, length);
} else {
Expand Down
3 changes: 2 additions & 1 deletion src/ucp/dt/dt.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,8 @@ size_t ucp_dt_pack(ucp_worker_h worker, ucp_datatype_t datatype,

switch (datatype & UCP_DATATYPE_CLASS_MASK) {
case UCP_DATATYPE_CONTIG:
if (ucs_likely(UCP_MEM_IS_HOST(mem_type))) {
if ((ucs_likely(UCP_MEM_IS_HOST(mem_type))) ||
(ucs_likely(UCP_MEM_IS_CUDA_MANAGED(mem_type)))) {
UCS_PROFILE_CALL(memcpy, dest, src + state->offset, length);
} else {
ucp_mem_type_pack(worker, dest, src + state->offset, length, mem_type);
Expand Down
3 changes: 2 additions & 1 deletion src/ucp/dt/dt.inl
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ ucp_dt_unpack_only(ucp_worker_h worker, void *buffer, size_t count,
ucs_unlikely(length > (buffer_size = ucp_contig_dt_length(datatype, count)))) {
goto err_truncated;
}
if (ucs_likely(UCP_MEM_IS_HOST(mem_type))) {
if (ucs_likely(UCP_MEM_IS_HOST(mem_type)) ||
(ucs_likely(UCP_MEM_IS_CUDA_MANAGED(mem_type)))) {
UCS_PROFILE_NAMED_CALL("memcpy_recv", memcpy, buffer, data, length);
} else {
ucp_mem_type_unpack(worker, buffer, data, length, mem_type);
Expand Down
1 change: 1 addition & 0 deletions src/uct/api/uct.h
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,7 @@ enum {
typedef enum {
UCT_MD_MEM_TYPE_HOST = 0, /**< Default system memory */
UCT_MD_MEM_TYPE_CUDA, /**< NVIDIA CUDA memory */
UCT_MD_MEM_TYPE_CUDA_MANAGED, /**< NVIDIA CUDA managed (or unified) memory*/
UCT_MD_MEM_TYPE_LAST
} uct_memory_type_t;

Expand Down
2 changes: 1 addition & 1 deletion test/gtest/uct/test_md.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ extern "C" {
#include <cuda_runtime.h>
#endif

std::string const test_md::mem_types[] = {"host", "cuda"};
std::string const test_md::mem_types[] = {"host", "cuda", "cuda-managed"};

void* test_md::alloc_thread(void *arg)
{
Expand Down

0 comments on commit 007e885

Please sign in to comment.