Skip to content

Commit

Permalink
address reviews
Browse files Browse the repository at this point in the history
  • Loading branch information
Matt711 committed Sep 24, 2024
1 parent dbc39b4 commit 0d0ea02
Show file tree
Hide file tree
Showing 11 changed files with 177 additions and 4 deletions.
1 change: 1 addition & 0 deletions python/rmm/rmm/_lib/cuda_stream.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.librmm.cuda_stream cimport cuda_stream
from rmm.pylibrmm.cuda_stream cimport CudaStream
15 changes: 15 additions & 0 deletions python/rmm/rmm/_lib/cuda_stream_pool.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.librmm.cuda_stream_pool cimport cuda_stream_pool
15 changes: 15 additions & 0 deletions python/rmm/rmm/_lib/cuda_stream_view.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.librmm.cuda_stream_view cimport cuda_stream_view
6 changes: 6 additions & 0 deletions python/rmm/rmm/_lib/device_buffer.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.librmm.device_buffer cimport (
cuda_device_id,
device_buffer,
get_current_cuda_device,
prefetch,
)
from rmm.pylibrmm.device_buffer cimport (
DeviceBuffer,
copy_device_to_ptr,
Expand Down
15 changes: 15 additions & 0 deletions python/rmm/rmm/_lib/device_uvector.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.librmm.device_uvector cimport device_uvector
75 changes: 75 additions & 0 deletions python/rmm/rmm/_lib/logger.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.librmm.logger cimport logger, logging_level, spdlog_logger
from rmm.pylibrmm.logger cimport (
_validate_level_type,
flush_logger,
get_flush_level,
get_logging_level,
set_flush_level,
set_logging_level,
should_log,
)


cdef extern from "spdlog/common.h" namespace "spdlog::level" nogil:
cpdef enum logging_level "spdlog::level::level_enum":
"""
The debug logging level for RMM.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
Valid levels, in decreasing order of verbosity, are TRACE, DEBUG,
INFO, WARN, ERR, CRITICAL, and OFF. Default is INFO.
Examples
--------
>>> import rmm
>>> rmm.logging_level.DEBUG
<logging_level.DEBUG: 1>
>>> rmm.logging_level.DEBUG.value
1
>>> rmm.logging_level.DEBUG.name
'DEBUG'
See Also
--------
set_logging_level : Set the debug logging level
get_logging_level : Get the current debug logging level
"""
TRACE "spdlog::level::trace"
DEBUG "spdlog::level::debug"
INFO "spdlog::level::info"
WARN "spdlog::level::warn"
ERR "spdlog::level::err"
CRITICAL "spdlog::level::critical"
OFF "spdlog::level::off"


cdef extern from "spdlog/spdlog.h" namespace "spdlog" nogil:
cdef cppclass spdlog_logger "spdlog::logger":
spdlog_logger() except +
void set_level(logging_level level)
logging_level level()
void flush() except +
void flush_on(logging_level level)
logging_level flush_level()
bool should_log(logging_level msg_level)


cdef extern from "rmm/logger.hpp" namespace "rmm" nogil:
cdef spdlog_logger& logger() except +
27 changes: 27 additions & 0 deletions python/rmm/rmm/_lib/memory_resource.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.librmm.memory_resource cimport (
CppExcept,
allocate_callback_t,
allocation_handle_type,
available_device_memory,
binning_memory_resource,
callback_memory_resource,
cuda_async_memory_resource,
cuda_memory_resource,
deallocate_callback_t,
device_memory_resource,
failure_callback_resource_adaptor,
failure_callback_t,
fixed_size_memory_resource,
limiting_resource_adaptor,
logging_resource_adaptor,
managed_memory_resource,
percent_of_free_device_memory,
pool_memory_resource,
prefetch_resource_adaptor,
sam_headroom_memory_resource,
statistics_resource_adaptor,
system_memory_resource,
throw_cpp_except,
tracking_resource_adaptor,
translate_python_except_to_cpp,
)
from rmm.pylibrmm.memory_resource cimport (
BinningMemoryResource,
CallbackMemoryResource,
Expand Down
21 changes: 21 additions & 0 deletions python/rmm/rmm/_lib/per_device_resource.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.librmm.per_device_resource cimport (
cuda_device_id,
get_current_device_resource,
get_per_device_resource,
set_current_device_resource,
set_per_device_resource,
)
2 changes: 1 addition & 1 deletion python/rmm/rmm/allocators/torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
# allocator .so relative to the current file because the current file
# is pure Python and will therefore be in the source directory.
# Instead, we search relative to an arbitrary file in the compiled
# package. We use the cpp._logger module because it is small.
# package. We use the librmm._logger module because it is small.
from rmm.librmm import _logger

sofile = pathlib.Path(_logger.__file__).parent / "_torch_allocator.so"
Expand Down
2 changes: 1 addition & 1 deletion python/rmm/rmm/librmm/_logger.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from ._logger cimport logging_level # no-cython-lint
from rmm.librmm._logger cimport logging_level # no-cython-lint
2 changes: 0 additions & 2 deletions python/rmm/rmm/librmm/memory_resource.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,6 @@ cdef extern from *:
void throw_cpp_except(CppExcept) nogil


# NOTE: Keep extern declarations in .pyx file as much as possible to avoid
# leaking dependencies when importing RMM Cython .pxd files
cdef extern from "rmm/mr/device/cuda_memory_resource.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass cuda_memory_resource(device_memory_resource):
Expand Down

0 comments on commit 0d0ea02

Please sign in to comment.