diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c01f53..65fc184 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -94,6 +94,18 @@ if(OS_WIN) ) endif() +# rpath + +set(CMAKE_MACOSX_RPATH 1) +set(MYNTEYE_CMAKE_RPATH "") +if(WITH_OPENCV) + list(APPEND MYNTEYE_CMAKE_RPATH ${OpenCV_LIB_PATH}) +endif() +if(MYNTEYE_CMAKE_RPATH) + message(STATUS "RPATH: ${MYNTEYE_CMAKE_RPATH}") + set(CMAKE_INSTALL_RPATH "${MYNTEYE_CMAKE_RPATH}") +endif() + # targets add_definitions(-DMYNTEYE_EXPORTS) diff --git a/cmake/DetectOpenCV.cmake b/cmake/DetectOpenCV.cmake index 34a5779..d166f50 100644 --- a/cmake/DetectOpenCV.cmake +++ b/cmake/DetectOpenCV.cmake @@ -41,6 +41,25 @@ if(${__index} GREATER -1) set(WITH_OPENCV_WORLD TRUE) endif() +if(NOT OpenCV_LIB_PATH) + list(LENGTH OpenCV_INCLUDE_DIRS __length) + if(${__length} GREATER 0) + list(GET OpenCV_INCLUDE_DIRS 0 __include_dir) + string(REGEX REPLACE "include.*$" "lib" __lib_dir "${__include_dir}") + find_library(__opencv_lib + NAMES opencv_core3 opencv_core opencv_world + PATHS "${__lib_dir}" "${__lib_dir}/x86_64-linux-gnu" + NO_DEFAULT_PATH) + #message(STATUS "__opencv_lib: ${__opencv_lib}") + if(__opencv_lib) + get_filename_component(OpenCV_LIB_PATH "${__opencv_lib}" DIRECTORY) + else() + set(OpenCV_LIB_PATH "${__lib_dir}") + endif() + #message(STATUS "OpenCV_LIB_PATH: ${OpenCV_LIB_PATH}") + endif() +endif() + if(MSVC OR MSYS OR MINGW) get_filename_component(OpenCV_LIB_SEARCH_PATH "${OpenCV_LIB_PATH}/../bin" ABSOLUTE) else() diff --git a/src/mynteye/api/processor/depth_processor.cc b/src/mynteye/api/processor/depth_processor.cc index e9ceea6..e6e5746 100644 --- a/src/mynteye/api/processor/depth_processor.cc +++ b/src/mynteye/api/processor/depth_processor.cc @@ -41,13 +41,23 @@ Object *DepthProcessor::OnCreateOutput() { bool DepthProcessor::OnProcess( Object *const in, Object *const out, Processor *const parent) { MYNTEYE_UNUSED(parent) - // const ObjMat *input = Object::Cast(in); - // ObjMat *output = Object::Cast(out); - // cv::Mat channels[3 /*input->value.channels()*/]; - // cv::split(input->value, channels); - // channels[2].convertTo(output->value, CV_16UC1); - // output->id = input->id; - // output->data = input->data; + const ObjMat *input = Object::Cast(in); + ObjMat *output = Object::Cast(out); + int rows = input->value.rows; + int cols = input->value.cols; + float T = 0.08; + float f = 0.01; + cv::Mat depth_mat = cv::Mat::zeros(rows, cols, CV_32F); + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + float disparity_value = input->value.at(i, j); + float depth = T * f / disparity_value; + depth_mat.at(i, j) = depth; + } + } + output->value = depth_mat; + output->id = input->id; + output->data = input->data; return true; } diff --git a/src/mynteye/api/processor/points_processor.cc b/src/mynteye/api/processor/points_processor.cc index 81102d5..3bb9524 100644 --- a/src/mynteye/api/processor/points_processor.cc +++ b/src/mynteye/api/processor/points_processor.cc @@ -14,6 +14,8 @@ #include "mynteye/api/processor/points_processor.h" #include +#include +#include #include @@ -21,6 +23,33 @@ MYNTEYE_BEGIN_NAMESPACE +namespace { +// Encapsulate differences between processing float and uint16_t depths +template struct DepthTraits {}; + +template<> +struct DepthTraits { + static inline bool valid(uint16_t depth) { return depth != 0; } + static inline float toMeters(uint16_t depth) { return depth * 0.001f; } // originally mm + static inline uint16_t fromMeters(float depth) { return (depth * 1000.0f) + 0.5f; } + static inline void initializeBuffer(std::vector& buffer) {} // Do nothing - already zero-filled +}; + +template<> +struct DepthTraits { + static inline bool valid(float depth) { return std::isfinite(depth); } + static inline float toMeters(float depth) { return depth; } + static inline float fromMeters(float depth) { return depth; } + + static inline void initializeBuffer(std::vector& buffer) { + float* start = reinterpret_cast(&buffer[0]); + float* end = reinterpret_cast(&buffer[0] + buffer.size()); + std::fill(start, end, std::numeric_limits::quiet_NaN()); + } +}; + +}; // namespace + const char PointsProcessor::NAME[] = "PointsProcessor"; PointsProcessor::PointsProcessor(std::int32_t proc_period) @@ -42,6 +71,46 @@ Object *PointsProcessor::OnCreateOutput() { bool PointsProcessor::OnProcess( Object *const in, Object *const out, Processor *const parent) { + MYNTEYE_UNUSED(parent) + + float fx = 3.6797709792391299e+02; + float fy = 3.6808712539453859e+02; + float cx = 3.7414963027144353e+02; + float cy = 2.3125000326472903e+02; + + // Use correct principal point from calibration + float center_x = cx; + float center_y = cy; + + // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) + double unit_scaling = DepthTraits::toMeters(static_cast(1)); + float constant_x = unit_scaling / fx; + float constant_y = unit_scaling / fy; + // float bad_point = std::numeric_limits::quiet_NaN(); + + const ObjMat *input = Object::Cast(in); + ObjMat *output = Object::Cast(out); + output->value.create(input->value.size(), CV_MAKETYPE(CV_32F, 3)); + + int height = static_cast(output->value.rows); + int width = static_cast(output->value.cols); + for (int v = 0; v < height; ++v) { + + cv::Vec3f *dptr = output->value.ptr(v); + for (int u = 0; u < width; ++u) { + float depth = input->value.at(v, u); + + // Missing points denoted by NaNs + if (!DepthTraits::valid(depth)) { + continue; + } + + dptr[u][0] = (u - center_x) * depth * constant_x; + dptr[u][1] = (v - center_y) * depth * constant_y; + dptr[u][2] = DepthTraits::toMeters(depth); + } + } + return true; }