feat(calib models): complie and process function

This commit is contained in:
TinyOh 2019-01-06 23:05:22 +08:00
parent e91c5d53fd
commit 675f5ff97f
2 changed files with 237 additions and 12 deletions

View File

@ -150,6 +150,7 @@ if(WITH_CAM_MODELS)
src/mynteye/api/camodocal/src/gpl/EigenQuaternionParameterization.cc
)
target_link_libraries(camodocal ${CERES_LIBRARIES})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
endif()
## libmynteye
@ -170,17 +171,6 @@ if(OS_WIN)
set(UVC_SRC src/mynteye/uvc/win/uvc-wmf.cc)
elseif(OS_MAC)
add_compile_options(-x objective-c++ -Wno-unused-command-line-argument -Wno-missing-method-return-type -Wno-sign-compare)
## INCLUDE_DIRECTORIES(src/mynteye/uvc/macosx)
## INCLUDE_DIRECTORIES(src/mynteye/uvc/macosx/VVUVCKit)
## aux_source_directory(src/mynteye/uvc/macosx/VVUVCKit/ MAC_VVUVCKIT_SRC_LIST)
## aux_source_directory(src/mynteye/uvc/macosx/USBBusProber/ MAC_USBBUSPROBER_SRC_LIST)
## add_library(usbBusProber SHARED ${MAC_USBBUSPROBER_SRC_LIST})
## set_target_properties(usbBusProber PROPERTIES FRAMEWORK TRUE )
## add_library(vvuvckit SHARED ${MAC_VVUVCKIT_SRC_LIST})
## set_target_properties(vvuvckit PROPERTIES FRAMEWORK TRUE )
## find_package(libuvc REQUIRED)
## set(UVC_LIB ${libuvc_LIBRARIES})
## include_directories(${libuvc_INCLUDE_DIRS})
INCLUDE_DIRECTORIES(src/mynteye/uvc/macosx/USBBusProber.framework/Headers)
INCLUDE_DIRECTORIES(src/mynteye/uvc/macosx/VVUVCKit.framework/Headers)
@ -257,6 +247,10 @@ target_link_libraries(${MYNTEYE_NAME} ${MYNTEYE_LINKLIBS})
if(OS_MAC)
target_link_libraries( ${MYNTEYE_NAME} ${OSX_EXTRA_LIBS} )
endif()
if(WITH_CAM_MODELS)
# link_directories("./_output/lib")
target_link_libraries(${MYNTEYE_NAME} "${OUT_DIR}/lib/libcamodocal.a")
endif()
target_link_threads(${MYNTEYE_NAME})
if(OS_WIN)

View File

@ -17,12 +17,233 @@
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "mynteye/logger.h"
#include "mynteye/device/device.h"
// #define WITH_CAM_MODELS
#ifdef WITH_CAM_MODELS
#include <boost/algorithm/string.hpp>
#include <boost/filesystem.hpp>
#include <boost/program_options.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <camodocal/camera_models/Camera.h>
#include <camodocal/camera_models/CameraFactory.h>
#include <camodocal/camera_models/CataCamera.h>
#include <camodocal/camera_models/EquidistantCamera.h>
#include <camodocal/camera_models/PinholeCamera.h>
#include <camodocal/gpl/gpl.h>
#include <camodocal/camera_models/Camera.h>
std::string _left_camera_yaml;
std::string _right_camera_yaml;
std::map<std::string , camodocal::CameraPtr> _cam_odo_ptr;
void stereoRectify(const CvMat* K1, const CvMat* K2,
const CvMat* D1, const CvMat* D2, CvSize imageSize,
const CvMat* matR, const CvMat* matT,
CvMat* _R1, CvMat* _R2, CvMat* _P1, CvMat* _P2,
int flags = cv::CALIB_ZERO_DISPARITY, double alpha = -1,
CvSize newImgSize = cv::Size()) {
double _om[3], _t[3] = {0}, _uu[3]={0,0,0}, _r_r[3][3], _pp[3][4];
double _ww[3], _wr[3][3], _z[3] = {0,0,0}, _ri[3][3], _w3[3];
cv::Rect_<float> inner1, inner2, outer1, outer2;
CvMat om = cvMat(3, 1, CV_64F, _om);
CvMat t = cvMat(3, 1, CV_64F, _t);
CvMat uu = cvMat(3, 1, CV_64F, _uu);
CvMat r_r = cvMat(3, 3, CV_64F, _r_r);
CvMat pp = cvMat(3, 4, CV_64F, _pp);
CvMat ww = cvMat(3, 1, CV_64F, _ww); // temps
CvMat w3 = cvMat(3, 1, CV_64F, _w3); // temps
CvMat wR = cvMat(3, 3, CV_64F, _wr);
CvMat Z = cvMat(3, 1, CV_64F, _z);
CvMat Ri = cvMat(3, 3, CV_64F, _ri);
double nx = imageSize.width, ny = imageSize.height;
int i, k;
double nt, nw;
if( matR->rows == 3 && matR->cols == 3 )
cvRodrigues2(matR, &om); // get vector rotation
else
cvConvert(matR, &om); // it's already a rotation vector
cvConvertScale(&om, &om, -0.5); // get average rotation
cvRodrigues2(&om, &r_r); // rotate cameras to same orientation by averaging
cvMatMul(&r_r, matT, &t);
int idx = fabs(_t[0]) > fabs(_t[1]) ? 0 : 1;
// if idx == 0
// e1 = T / ||T||
// e2 = e1 x [0,0,1]
// if idx == 1
// e2 = T / ||T||
// e1 = e2 x [0,0,1]
// e3 = e1 x e2
_uu[2] = 1;
cvCrossProduct(&uu, &t, &ww);
nt = cvNorm(&t, 0, CV_L2);
nw = cvNorm(&ww, 0, CV_L2);
cvConvertScale(&ww, &ww, 1 / nw);
cvCrossProduct(&t, &ww, &w3);
nw = cvNorm(&w3, 0, CV_L2);
cvConvertScale(&w3, &w3, 1 / nw);
_uu[2] = 0;
for (i = 0; i < 3; ++i)
{
_wr[idx][i] = -_t[i] / nt;
_wr[idx ^ 1][i] = -_ww[i];
_wr[2][i] = _w3[i] * (1 - 2 * idx); // if idx == 1 -> opposite direction
}
// apply to both views
cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, CV_GEMM_B_T);
cvConvert( &Ri, _R1 );
cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, 0);
cvConvert( &Ri, _R2 );
cvMatMul(&Ri, matT, &t);
// calculate projection/camera matrices
// these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
double fc_new = DBL_MAX;
CvPoint2D64f cc_new[2] = {{0,0}, {0,0}};
newImgSize = newImgSize.width * newImgSize.height != 0 ? newImgSize : imageSize;
const double ratio_x = (double)newImgSize.width / imageSize.width / 2;
const double ratio_y = (double)newImgSize.height / imageSize.height / 2;
const double ratio = idx == 1 ? ratio_x : ratio_y;
fc_new = (cvmGet(K1, idx ^ 1, idx ^ 1) + cvmGet(K2, idx ^ 1, idx ^ 1)) * ratio;
for( k = 0; k < 2; k++ )
{
CvPoint2D32f _pts[4];
CvPoint3D32f _pts_3[4];
CvMat pts = cvMat(1, 4, CV_32FC2, _pts);
CvMat pts_3 = cvMat(1, 4, CV_32FC3, _pts_3);
Eigen::Vector2d a;
Eigen::Vector3d b;
for( i = 0; i < 4; i++ )
{
int j = (i<2) ? 0 : 1;
a.x() = (float)((i % 2)*(nx));
a.y() = (float)(j*(ny));
if (0 == k){
_cam_odo_ptr["LEFT"]->liftProjective(a, b);
}
else{
_cam_odo_ptr["RIGHT"]->liftProjective(a, b);
}
_pts[i].x = b.x()/b.z();
_pts[i].y = b.y()/b.z();
}
cvConvertPointsHomogeneous( &pts, &pts_3 );
//Change camera matrix to have cc=[0,0] and fc = fc_new
double _a_tmp[3][3];
CvMat A_tmp = cvMat(3, 3, CV_64F, _a_tmp);
_a_tmp[0][0]=fc_new;
_a_tmp[1][1]=fc_new;
_a_tmp[0][2]=0.0;
_a_tmp[1][2]=0.0;
cvProjectPoints2( &pts_3, k == 0 ? _R1 : _R2, &Z, &A_tmp, 0, &pts );
CvScalar avg = cvAvg(&pts);
cc_new[k].x = (nx)/2 - avg.val[0];
cc_new[k].y = (ny)/2 - avg.val[1];
//cc_new[k].x = (nx)/2;
//cc_new[k].y = (ny)/2;
}
if( flags & cv::CALIB_ZERO_DISPARITY )
{
cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
}
else if( idx == 0 ) // horizontal stereo
cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
else // vertical stereo
cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
cvZero( &pp );
_pp[0][0] = _pp[1][1] = fc_new;
_pp[0][2] = cc_new[0].x;
_pp[1][2] = cc_new[0].y;
_pp[2][2] = 1;
cvConvert(&pp, _P1);
_pp[0][2] = cc_new[1].x;
_pp[1][2] = cc_new[1].y;
_pp[idx][3] = _t[idx]*fc_new; // baseline * focal length
cvConvert(&pp, _P2);
alpha = MIN(alpha, 1.);
//icvGetRectangles( K1, D1, _R1, _P1, imageSize, inner1, outer1 );
//icvGetRectangles( K2, D2, _R2, _P2, imageSize, inner2, outer2 );
{
newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imageSize;
double cx1_0 = cc_new[0].x;
double cy1_0 = cc_new[0].y;
double cx2_0 = cc_new[1].x;
double cy2_0 = cc_new[1].y;
double cx1 = newImgSize.width*cx1_0/imageSize.width;
double cy1 = newImgSize.height*cy1_0/imageSize.height;
double cx2 = newImgSize.width*cx2_0/imageSize.width;
double cy2 = newImgSize.height*cy2_0/imageSize.height;
double s = 1.;
//if( alpha >= 0 )
//{
// double s0 = std::max(std::max(std::max((double)cx1/(cx1_0 - inner1.x), (double)cy1/(cy1_0 - inner1.y)),
// (double)(newImgSize.width - cx1)/(inner1.x + inner1.width - cx1_0)),
// (double)(newImgSize.height - cy1)/(inner1.y + inner1.height - cy1_0));
// s0 = std::max(std::max(std::max(std::max((double)cx2/(cx2_0 - inner2.x), (double)cy2/(cy2_0 - inner2.y)),
// (double)(newImgSize.width - cx2)/(inner2.x + inner2.width - cx2_0)),
// (double)(newImgSize.height - cy2)/(inner2.y + inner2.height - cy2_0)),
// s0);
// double s1 = std::min(std::min(std::min((double)cx1/(cx1_0 - outer1.x), (double)cy1/(cy1_0 - outer1.y)),
// (double)(newImgSize.width - cx1)/(outer1.x + outer1.width - cx1_0)),
// (double)(newImgSize.height - cy1)/(outer1.y + outer1.height - cy1_0));
// s1 = std::min(std::min(std::min(std::min((double)cx2/(cx2_0 - outer2.x), (double)cy2/(cy2_0 - outer2.y)),
// (double)(newImgSize.width - cx2)/(outer2.x + outer2.width - cx2_0)),
// (double)(newImgSize.height - cy2)/(outer2.y + outer2.height - cy2_0)),
// s1);
// s = s0*(1 - alpha) + s1*alpha;
//}
fc_new *= s;
cc_new[0] = cvPoint2D64f(cx1, cy1);
cc_new[1] = cvPoint2D64f(cx2, cy2);
cvmSet(_P1, 0, 0, fc_new);
cvmSet(_P1, 1, 1, fc_new);
cvmSet(_P1, 0, 2, cx1);
cvmSet(_P1, 1, 2, cy1);
cvmSet(_P2, 0, 0, fc_new);
cvmSet(_P2, 1, 1, fc_new);
cvmSet(_P2, 0, 2, cx2);
cvmSet(_P2, 1, 2, cy2);
cvmSet(_P2, idx, 3, s*cvmGet(_P2, idx, 3));
}
}
#endif
MYNTEYE_BEGIN_NAMESPACE
const char RectifyProcessor::NAME[] = "RectifyProcessor";
RectifyProcessor::RectifyProcessor(
@ -50,10 +271,18 @@ void RectifyProcessor::NotifyImageParamsChanged() {
device_->GetExtrinsics(Stream::RIGHT, Stream::LEFT));
} else if (in_left->calib_model() ==
CalibrationModel::KANNALA_BRANDT) {
#ifdef WITH_CAM_MODELS
_left_camera_yaml = "./camera_left.yaml";
_right_camera_yaml = "./camera_left.yaml";
_cam_odo_ptr["LEFT"] = camodocal::CameraFactory::instance()->generateCameraFromYamlFile(_left_camera_yaml);
_cam_odo_ptr["RIGHT"] = camodocal::CameraFactory::instance()->generateCameraFromYamlFile(_right_camera_yaml);
InitParams(
*std::dynamic_pointer_cast<IntrinsicsEquidistant>(in_left),
*std::dynamic_pointer_cast<IntrinsicsEquidistant>(in_right),
device_->GetExtrinsics(Stream::RIGHT, Stream::LEFT));
#else
VLOG(2) << __func__ << "KANNALA_BRANDT macro seitch is not on";
#endif
} else {
// todo
}
@ -116,6 +345,8 @@ void RectifyProcessor::InitParams(
cv::initUndistortRectifyMap(M2, D2, R2, P2, size, CV_16SC2, map21, map22);
}
void RectifyProcessor::InitParams(
IntrinsicsEquidistant in_left,
IntrinsicsEquidistant in_right,