Skip to content

Commit

Permalink
Working interface and CMakeLists.txt file.
Browse files Browse the repository at this point in the history
  • Loading branch information
matinraayai committed Apr 24, 2022
1 parent c91fd2b commit 0c7b6a7
Show file tree
Hide file tree
Showing 2 changed files with 179 additions and 1 deletion.
40 changes: 39 additions & 1 deletion src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,18 @@ project(mcx)
find_package(ZLIB REQUIRED)
find_package(CUDA QUIET REQUIRED)

find_package(OpenMP REQUIRED)

add_subdirectory(zmat)

option(BUILD_MEX "Build mex" ON)

if(BUILD_PYTHON)
add_subdirectory(pybind11)
find_package(PythonLibs 3.8 REQUIRED)
include_directories(${PYTHON_INCLUDE_DIRS})
endif()

if(BUILD_MEX)
find_package(Matlab)
endif()
Expand All @@ -23,7 +31,7 @@ endif()
set(
CUDA_NVCC_FLAGS
${CUDA_NVCC_FLAGS};
-g -lineinfo -Xcompiler -Wall -Xcompiler -fopenmp -O3 -arch=sm_30
-g -lineinfo -Xcompiler -Wall -Xcompiler -fopenmp -O3 -arch=sm_35
-DMCX_TARGET_NAME="Fermi MCX" -DUSE_ATOMIC -use_fast_math
-DSAVE_DETECTORS -Xcompiler -fPIC
)
Expand Down Expand Up @@ -77,6 +85,36 @@ target_link_libraries(
ZLIB::ZLIB
)

if (BUILD_PYTHON)
cuda_add_library(pymcx MODULE
mcx_core.cu
mcx_core.h
mcx_utils.c
mcx_utils.h
mcx_shapes.c
mcx_shapes.h
mcx_bench.c
mcx_bench.h
mcx_mie.cpp
mcx_mie.h
tictoc.c
tictoc.h
cjson/cJSON.c
cjson/cJSON.h
ubj/ubj.h
ubj/ubjw.c
pymcx.cpp
)
# pybind11_add_module(example example.cpp)
target_link_libraries(pymcx OpenMP::OpenMP_CXX zmat ZLIB::ZLIB pybind11::module pybind11::lto pybind11::windows_extras)

pybind11_extension(pymcx)
pybind11_strip(pymcx)

set_target_properties(pymcx PROPERTIES CXX_VISIBILITY_PRESET "hidden"
CUDA_VISIBILITY_PRESET "hidden")
endif()

# Build mex file
if(BUILD_MEX AND Matlab_FOUND)
matlab_add_mex(
Expand Down
140 changes: 140 additions & 0 deletions src/pymcx.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
#include "pybind11/include/pybind11/pybind11.h"
#include "pybind11/include/pybind11/numpy.h"
#include <iostream>
#include <string>
#include "mcx_utils.h"
#include "mcx_core.h"

namespace py = pybind11;


void pyMcxInterface(const py::dict& userCfg) {
Config mcxConfig; /** mcxconfig: structure to store all simulation parameters */
GPUInfo *gpuInfo = nullptr; /** gpuinfo: structure to store GPU information */
unsigned int activeDev = 0; /** activedev: count of total active GPUs to be used */
/** To start an MCX simulation, we first create a simulation configuration and set all elements to its
* default settings.
*/
mcx_initcfg(&mcxConfig);
for (auto item : userCfg) {
// TODO: Add error checking for key
std::string itemKey = std::string(py::str(item.first));
if (itemKey == "nphoton") {
mcxConfig.nphoton = py::reinterpret_borrow<py::int_>(item.second);
}
if (itemKey == "nblocksize") {
mcxConfig.nblocksize = py::reinterpret_borrow<py::int_>(item.second);
}
if (itemKey == "nthread") {
mcxConfig.nthread = py::reinterpret_borrow<py::int_>(item.second);
}
if (itemKey == "vol") {
auto cStyleVolume = py::array_t<unsigned int, py::array::c_style | py::array::forcecast>::ensure(item.second);
if (!cStyleVolume) {
std::cout << "Failed to cast to c!" << std::endl;
auto fStyleVolume = py::array_t<unsigned int, py::array::f_style | py::array::forcecast>::ensure(item.second);
auto bufferInfo = fStyleVolume.request();
mcxConfig.vol = static_cast<unsigned int*>(bufferInfo.ptr);
mcxConfig.dim = {static_cast<unsigned int>(bufferInfo.shape.at(0)),
static_cast<unsigned int>(bufferInfo.shape.at(1)),
static_cast<unsigned int>(bufferInfo.shape.at(2))};
}
else {
std::cout << "Works like a charm converting to f!" << std::endl;
auto fStyleVolume = py::array_t<unsigned int, py::array::f_style | py::array::forcecast>(cStyleVolume);
mcxConfig.vol = static_cast<unsigned int*>(fStyleVolume.request().ptr);
auto bufferInfo = fStyleVolume.request();
mcxConfig.dim = {static_cast<unsigned int>(bufferInfo.shape.at(0)),
static_cast<unsigned int>(bufferInfo.shape.at(1)),
static_cast<unsigned int>(bufferInfo.shape.at(2))};
}
for (int i = 0; i < mcxConfig.dim.x * mcxConfig.dim.y * mcxConfig.dim.z; i++) {
std::cout << mcxConfig.vol[i] << ", " << std::endl;
}
}
}
/** The next step, we identify gpu number and query all GPU info */
if(!(activeDev = mcx_list_gpu(&mcxConfig, &gpuInfo))) {
mcx_error(-1,"No GPU device found\n",__FILE__,__LINE__);
}

//#ifdef _OPENMP
// /**
// Now we are ready to launch one thread for each involked GPU to run the simulation
// */
// omp_set_num_threads(int(activeDev));
// #pragma omp parallel {
//#endif

/**
This line runs the main MCX simulation for each GPU inside each thread
*/
mcx_run_simulation(&mcxConfig,gpuInfo);

// #ifdef _OPENMP
// }
// #endif

/**
Once simulation is complete, we clean up the allocated memory in config and gpuinfo, and exit
**/
mcx_cleargpuinfo(&gpuInfo);
mcx_clearcfg(&mcxConfig);
// return a pointer to the MCX output, wrapped in a std::vector
}

PYBIND11_MODULE(pymcx, m) {
m.doc() = "Monte Carlo eXtreme Python Interface www.mcx.space"; // optional module docstring

m.def("mcx", &pyMcxInterface, "Runs MCX");
}


//int main (int argc, char *argv[]) {
// /*! structure to store all simulation parameters
// */
// Config mcxconfig; /** mcxconfig: structure to store all simulation parameters */
// GPUInfo *gpuinfo=NULL; /** gpuinfo: structure to store GPU information */
// unsigned int activedev=0; /** activedev: count of total active GPUs to be used */
//
// /**
// To start an MCX simulation, we first create a simulation configuration and
// set all elements to its default settings.
// */
// mcx_initcfg(&mcxconfig);
//
// /**
// Then, we parse the full command line parameters and set user specified settings
// */
// mcx_parsecmd(argc,argv,&mcxconfig);
//
// /** The next step, we identify gpu number and query all GPU info */
// if(!(activedev=mcx_list_gpu(&mcxconfig,&gpuinfo))){
// mcx_error(-1,"No GPU device found\n",__FILE__,__LINE__);
// }
//
//#ifdef _OPENMP
// /**
// Now we are ready to launch one thread for each involked GPU to run the simulation
// */
// omp_set_num_threads(activedev);
// #pragma omp parallel
// {
//#endif
//
// /**
// This line runs the main MCX simulation for each GPU inside each thread
// */
// mcx_run_simulation(&mcxconfig,gpuinfo);
//
//#ifdef _OPENMP
// }
//#endif
//
///**
// Once simulation is complete, we clean up the allocated memory in config and gpuinfo, and exit
// */
//mcx_cleargpuinfo(&gpuinfo);
//mcx_clearcfg(&mcxconfig);
//return 0;
//}

0 comments on commit 0c7b6a7

Please sign in to comment.