Skip to content

Commit

Permalink
Throw rmm::out_of_memory when we know for sure (#894)
Browse files Browse the repository at this point in the history
When RMM fails to allocate a buffer, it currently throws a `rmm::bad_alloc` exception, which a user might want to catch, spill some GPU buffers, and try again. But that exception covers all error conditions, catching it blindly may hide some other more serious CUDA errors, making the code hard to debug. Adding a more specific `rmm::out_of_memory` exception and throwing it when we are certain we are running out of memory, so that it can be caught to trigger spilling.

Authors:
  - Rong Ou (https://github.com/rongou)

Approvers:
  - Mark Harris (https://github.com/harrism)
  - Jake Hemstad (https://github.com/jrhemstad)

URL: #894
  • Loading branch information
rongou authored Oct 26, 2021
1 parent c781527 commit fcf92c5
Show file tree
Hide file tree
Showing 13 changed files with 53 additions and 12 deletions.
35 changes: 35 additions & 0 deletions include/rmm/detail/error.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,16 @@ class bad_alloc : public std::bad_alloc {
std::string _what;
};

/**
* @brief Exception thrown when RMM runs out of memory
*
* This error should only be thrown when we know for sure a resource is out of memory.
*/
class out_of_memory : public bad_alloc {
public:
using bad_alloc::bad_alloc;
};

/**
* @brief Exception thrown when attempting to access outside of a defined range
*
Expand Down Expand Up @@ -167,6 +177,31 @@ class out_of_range : public std::out_of_range {
} while (0)
#define RMM_CUDA_TRY_1(_call) RMM_CUDA_TRY_2(_call, rmm::cuda_error)

/**
* @brief Error checking macro for CUDA memory allocation calls.
*
* Invokes a CUDA memory allocation function call. If the call does not return
* `cudaSuccess`, invokes cudaGetLastError() to clear the error and throws an
* exception detailing the CUDA error that occurred
*
* Defaults to throwing `rmm::bad_alloc`, but when `cudaErrorMemoryAllocation` is returned,
* `rmm::out_of_memory` is thrown instead.
*/
#define RMM_CUDA_TRY_ALLOC(_call) \
do { \
cudaError_t const error = (_call); \
if (cudaSuccess != error) { \
cudaGetLastError(); \
auto const msg = std::string{"CUDA error at: "} + __FILE__ + ":" + RMM_STRINGIFY(__LINE__) + \
": " + cudaGetErrorName(error) + " " + cudaGetErrorString(error); \
if (cudaErrorMemoryAllocation == error) { \
throw rmm::out_of_memory{msg}; \
} else { \
throw rmm::bad_alloc{msg}; \
} \
} \
} while (0)

/**
* @brief Error checking macro similar to `assert` for CUDA runtime API calls
*
Expand Down
2 changes: 1 addition & 1 deletion include/rmm/mr/device/arena_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ class arena_memory_resource final : public device_memory_resource {
pointer = arena.allocate(bytes);
if (pointer == nullptr) {
if (dump_log_on_failure_) { dump_memory_log(bytes); }
RMM_FAIL("Maximum pool size exceeded", rmm::bad_alloc);
RMM_FAIL("Maximum pool size exceeded", rmm::out_of_memory);
}
}

Expand Down
3 changes: 1 addition & 2 deletions include/rmm/mr/device/cuda_async_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,7 @@ class cuda_async_memory_resource final : public device_memory_resource {
void* ptr{nullptr};
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
if (bytes > 0) {
RMM_CUDA_TRY(cudaMallocFromPoolAsync(&ptr, bytes, pool_handle(), stream.value()),
rmm::bad_alloc);
RMM_CUDA_TRY_ALLOC(cudaMallocFromPoolAsync(&ptr, bytes, pool_handle(), stream.value()));
}
#else
(void)bytes;
Expand Down
2 changes: 1 addition & 1 deletion include/rmm/mr/device/cuda_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ class cuda_memory_resource final : public device_memory_resource {
void* do_allocate(std::size_t bytes, cuda_stream_view) override
{
void* ptr{nullptr};
RMM_CUDA_TRY(cudaMalloc(&ptr, bytes), rmm::bad_alloc);
RMM_CUDA_TRY_ALLOC(cudaMalloc(&ptr, bytes));
return ptr;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ class stream_ordered_memory_resource : public crtp<PoolResource>, public device_

size = rmm::detail::align_up(size, rmm::detail::CUDA_ALLOCATION_ALIGNMENT);
RMM_EXPECTS(size <= this->underlying().get_maximum_allocation_size(),
rmm::bad_alloc,
rmm::out_of_memory,
"Maximum allocation size exceeded");
auto const block = this->underlying().get_block(size, stream_event);

Expand Down
2 changes: 1 addition & 1 deletion include/rmm/mr/device/limiting_resource_adaptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ class limiting_resource_adaptor final : public device_memory_resource {
}

allocated_bytes_ -= proposed_size;
RMM_FAIL("Exceeded memory limit", rmm::bad_alloc);
RMM_FAIL("Exceeded memory limit", rmm::out_of_memory);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion include/rmm/mr/device/managed_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class managed_memory_resource final : public device_memory_resource {
if (bytes == 0) { return nullptr; }

void* ptr{nullptr};
RMM_CUDA_TRY(cudaMallocManaged(&ptr, bytes), rmm::bad_alloc);
RMM_CUDA_TRY_ALLOC(cudaMallocManaged(&ptr, bytes));
return ptr;
}

Expand Down
2 changes: 1 addition & 1 deletion include/rmm/mr/device/pool_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ class pool_memory_resource final
RMM_LOG_ERROR("[A][Stream {}][Upstream {}B][FAILURE maximum pool size exceeded]",
fmt::ptr(stream.value()),
min_size);
RMM_FAIL("Maximum pool size exceeded", rmm::bad_alloc);
RMM_FAIL("Maximum pool size exceeded", rmm::out_of_memory);
}

/**
Expand Down
4 changes: 2 additions & 2 deletions tests/mr/device/limiting_mr_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ TEST(LimitingTest, TooBig)
{
auto const max_size{5_MiB};
Limiting_adaptor mr{rmm::mr::get_current_device_resource(), max_size};
EXPECT_THROW(mr.allocate(max_size + 1), rmm::bad_alloc);
EXPECT_THROW(mr.allocate(max_size + 1), rmm::out_of_memory);
}

TEST(LimitingTest, UnderLimitDueToFrees)
Expand Down Expand Up @@ -83,7 +83,7 @@ TEST(LimitingTest, OverLimit)
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), max_size - allocated_bytes);
auto const size2{3_MiB};
EXPECT_THROW(mr.allocate(size2), rmm::bad_alloc);
EXPECT_THROW(mr.allocate(size2), rmm::out_of_memory);
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), max_size - allocated_bytes);
mr.deallocate(ptr1, 4_MiB);
Expand Down
3 changes: 3 additions & 0 deletions tests/mr/device/mr_multithreaded_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ struct mr_test_mt : public mr_test {
INSTANTIATE_TEST_CASE_P(MultiThreadResourceTests,
mr_test_mt,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
Expand Down
2 changes: 1 addition & 1 deletion tests/mr/device/mr_test.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ inline void test_various_allocations(rmm::mr::device_memory_resource* mr, cuda_s
// should fail to allocate too much
{
void* ptr{nullptr};
EXPECT_THROW(ptr = mr->allocate(1_PiB, stream), rmm::bad_alloc);
EXPECT_THROW(ptr = mr->allocate(1_PiB, stream), rmm::out_of_memory);
EXPECT_EQ(nullptr, ptr);
}
}
Expand Down
2 changes: 1 addition & 1 deletion tests/mr/device/pool_mr_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ TEST(PoolTest, ForceGrowth)
EXPECT_NO_THROW(mr.allocate(1000));
EXPECT_NO_THROW(mr.allocate(4000));
EXPECT_NO_THROW(mr.allocate(500));
EXPECT_THROW(mr.allocate(2000), rmm::bad_alloc); // too much
EXPECT_THROW(mr.allocate(2000), rmm::out_of_memory); // too much
}

TEST(PoolTest, DeletedStream)
Expand Down
4 changes: 4 additions & 0 deletions tests/mr/device/thrust_allocator_tests.cu
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,12 @@ TEST_P(allocator_test, first)
INSTANTIATE_TEST_CASE_P(ThrustAllocatorTests,
allocator_test,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
mr_factory{"Binning", &make_binning}),
[](auto const& info) { return info.param.name; });

Expand Down

0 comments on commit fcf92c5

Please sign in to comment.