diff --git a/samples/tutorials/util/pc_viewer.cc b/samples/tutorials/util/pc_viewer.cc index 08bcce6..bec96dc 100644 --- a/samples/tutorials/util/pc_viewer.cc +++ b/samples/tutorials/util/pc_viewer.cc @@ -84,7 +84,6 @@ void PCViewer::ConvertMatToPointCloud( for (int i = 0; i < xyz.rows; i++) { for (int j = 0; j < xyz.cols; j++) { auto &&p = xyz.at(i, j); - if (std::abs(p.z) > 9999) continue; if (std::isfinite(p.x) && std::isfinite(p.y) && std::isfinite(p.z)) { // LOG(INFO) << "[" << i << "," << j << "] x: " << p.x << ", y: " << p.y // << ", z: " << p.z; diff --git a/src/mynteye/api/processor/disparity_processor.cc b/src/mynteye/api/processor/disparity_processor.cc index c60de02..b6c37a3 100644 --- a/src/mynteye/api/processor/disparity_processor.cc +++ b/src/mynteye/api/processor/disparity_processor.cc @@ -26,34 +26,37 @@ const char DisparityProcessor::NAME[] = "DisparityProcessor"; DisparityProcessor::DisparityProcessor(std::int32_t proc_period) : Processor(std::move(proc_period)) { VLOG(2) << __func__ << ": proc_period=" << proc_period; - - int blockSize_ = 15; // 15 - int numDisparities_ = 64; // 64 + int sgbmWinSize = 3; + int numberOfDisparities = 64; #ifdef WITH_OPENCV2 - bm_ = cv::Ptr( - new cv::StereoBM( - cv::StereoBM::BASIC_PRESET, - numDisparities_, - blockSize_)); + // StereoSGBM + // http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html?#stereosgbm + sgbm_ = cv::Ptr( + new cv::StereoSGBM( + 0, // minDisparity + numberOfDisparities, // numDisparities + sgbmWinSize, // SADWindowSize + 8 * sgbmWinSize * sgbmWinSize, // P1 + 32 * sgbmWinSize * sgbmWinSize, // P2 + 1, // disp12MaxDiff + 63, // preFilterCap + 10, // uniquenessRatio + 100, // speckleWindowSize + 32, // speckleRange + false)); // fullDP #else - int minDisparity_ = 0; // 0 - int preFilterSize_ = 9; // 9 - int preFilterCap_ = 31; // 31 - int uniquenessRatio_ = 15; // 15 - int textureThreshold_ = 10; // 10 - int speckleWindowSize_ = 100; // 100 - int speckleRange_ = 4; // 4 - bm_ = cv::StereoBM::create(16, 9); - bm_->setBlockSize(blockSize_); - bm_->setMinDisparity(minDisparity_); - bm_->setNumDisparities(numDisparities_); - bm_->setPreFilterSize(preFilterSize_); - bm_->setPreFilterCap(preFilterCap_); - bm_->setUniquenessRatio(uniquenessRatio_); - bm_->setTextureThreshold(textureThreshold_); - bm_->setSpeckleWindowSize(speckleWindowSize_); - bm_->setSpeckleRange(speckleRange_); + sgbm_ = cv::StereoSGBM::create(0, 16, 3); + sgbm_->setPreFilterCap(63); + sgbm_->setBlockSize(sgbmWinSize); + sgbm_->setP1(8 * sgbmWinSize * sgbmWinSize); + sgbm_->setP2(32 * sgbmWinSize * sgbmWinSize); + sgbm_->setMinDisparity(0); + sgbm_->setNumDisparities(numberOfDisparities); + sgbm_->setUniquenessRatio(10); + sgbm_->setSpeckleWindowSize(100); + sgbm_->setSpeckleRange(32); + sgbm_->setDisp12MaxDiff(1); #endif } @@ -77,11 +80,27 @@ bool DisparityProcessor::OnProcess( cv::Mat disparity; #ifdef WITH_OPENCV2 - (*bm_)(input->first, input->second, disparity); + // StereoSGBM::operator() + // http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#stereosgbm-operator + // Output disparity map. It is a 16-bit signed single-channel image of the + // same size as the input image. + // It contains disparity values scaled by 16. So, to get the floating-point + // disparity map, + // you need to divide each disp element by 16. + (*sgbm_)(input->first, input->second, disparity); #else - bm_->compute(input->first, input->second, disparity); + // compute() + // http://docs.opencv.org/master/d2/d6e/classcv_1_1StereoMatcher.html + // Output disparity map. It has the same size as the input images. + // Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point + // disparity map + // (where each disparity value has 4 fractional bits), + // whereas other algorithms output 32-bit floating-point disparity map. + sgbm_->compute(input->first, input->second, disparity); #endif - disparity.convertTo(output->value, CV_32F, 1./16); + output->value = disparity / 16 + 1; + output->id = input->first_id; + output->data = inpu t->first_data; return true; } diff --git a/src/mynteye/api/processor/disparity_processor.h b/src/mynteye/api/processor/disparity_processor.h index 0ec43b6..370dca3 100644 --- a/src/mynteye/api/processor/disparity_processor.h +++ b/src/mynteye/api/processor/disparity_processor.h @@ -17,13 +17,11 @@ #include -#include - #include "mynteye/api/processor.h" namespace cv { -class StereoBM; +class StereoSGBM; } // namespace cv @@ -44,7 +42,7 @@ class DisparityProcessor : public Processor { Object *const in, Object *const out, Processor *const parent) override; private: - cv::Ptr bm_; + cv::Ptr sgbm_; }; MYNTEYE_END_NAMESPACE diff --git a/src/mynteye/api/processor/points_processor.cc b/src/mynteye/api/processor/points_processor.cc index 9b40e85..7d2b6d4 100644 --- a/src/mynteye/api/processor/points_processor.cc +++ b/src/mynteye/api/processor/points_processor.cc @@ -45,38 +45,9 @@ bool PointsProcessor::OnProcess( MYNTEYE_UNUSED(parent) const ObjMat *input = Object::Cast(in); ObjMat *output = Object::Cast(out); - - cv::Mat disparity = input->value; - output->value.create(disparity.size(), CV_MAKETYPE(CV_32FC3, 3)); - cv::Mat _3dImage = output->value; - - const float bigZ = 10000.f; - cv::Matx44d Q; - Q_.convertTo(Q, CV_64F); - - int x, cols = disparity.cols; - CV_Assert(cols >= 0); - - double minDisparity = FLT_MAX; - - cv::minMaxIdx(disparity, &minDisparity, 0, 0, 0); - - for (int y = 0; y < disparity.rows; y++) { - float *sptr = disparity.ptr(y); - cv::Vec3f *dptr = _3dImage.ptr(y); - - for (x = 0; x < cols; x++) { - double d = sptr[x]; - cv::Vec4d homg_pt = Q * cv::Vec4d(x, y, d, 1.0); - dptr[x] = cv::Vec3d(homg_pt.val); - dptr[x] /= homg_pt[3]; - - if (fabs(d - minDisparity) <= FLT_EPSILON) { - dptr[x][2] = bigZ; - } - } - } - + cv::reprojectImageTo3D(input->value, output->value, Q_, true); + output->id = input->id; + output->data = input->data; return true; } diff --git a/src/mynteye/api/processor/rectify_processor.cc b/src/mynteye/api/processor/rectify_processor.cc index 2b14e12..9c570f0 100644 --- a/src/mynteye/api/processor/rectify_processor.cc +++ b/src/mynteye/api/processor/rectify_processor.cc @@ -72,16 +72,12 @@ void RectifyProcessor::InitParams( in_right.cy, 0, 0, 1); cv::Mat D1(1, 5, CV_64F, in_left.coeffs); cv::Mat D2(1, 5, CV_64F, in_right.coeffs); - /* cv::Mat R = (cv::Mat_(3, 3) << ex_right_to_left.rotation[0][0], ex_right_to_left.rotation[0][1], ex_right_to_left.rotation[0][2], ex_right_to_left.rotation[1][0], ex_right_to_left.rotation[1][1], ex_right_to_left.rotation[1][2], ex_right_to_left.rotation[2][0], ex_right_to_left.rotation[2][1], ex_right_to_left.rotation[2][2]); - */ - cv::Mat R = - (cv::Mat_(3, 3) << 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0); cv::Mat T(3, 1, CV_64F, ex_right_to_left.translation); VLOG(2) << "InitParams size: " << size;