refactor(camodocal): remove ceres depend

This commit is contained in:
TinyOh
2019-01-24 17:20:34 +08:00
parent 0b71d05813
commit 54eae3e2d0
492 changed files with 29 additions and 304467 deletions

View File

@@ -1,399 +0,0 @@
#ifndef EIGENUTILS_H
#define EIGENUTILS_H
#include "Eigen/Dense"
#include <iostream>
#include "ceres/rotation.h"
namespace camodocal {
template <typename T>
T square(const T &m) {
return m * m;
}
// Returns the 3D cross product skew symmetric matrix of a given 3D vector
template <typename T>
Eigen::Matrix<T, 3, 3> skew(const Eigen::Matrix<T, 3, 1> &vec) {
return (Eigen::Matrix<T, 3, 3>() << T(0), -vec(2), vec(1), vec(2), T(0),
-vec(0), -vec(1), vec(0), T(0))
.finished();
}
template <typename Derived>
typename Eigen::MatrixBase<Derived>::PlainObject sqrtm(
const Eigen::MatrixBase<Derived> &A) {
Eigen::SelfAdjointEigenSolver<typename Derived::PlainObject> es(A);
return es.operatorSqrt();
}
template <typename T>
Eigen::Matrix<T, 3, 3> AngleAxisToRotationMatrix(
const Eigen::Matrix<T, 3, 1> &rvec) {
T angle = rvec.norm();
if (angle == T(0)) {
return Eigen::Matrix<T, 3, 3>::Identity();
}
Eigen::Matrix<T, 3, 1> axis;
axis = rvec.normalized();
Eigen::Matrix<T, 3, 3> rmat;
rmat = Eigen::AngleAxis<T>(angle, axis);
return rmat;
}
template <typename T>
Eigen::Quaternion<T> AngleAxisToQuaternion(const Eigen::Matrix<T, 3, 1> &rvec) {
Eigen::Matrix<T, 3, 3> rmat = AngleAxisToRotationMatrix<T>(rvec);
return Eigen::Quaternion<T>(rmat);
}
template <typename T>
void AngleAxisToQuaternion(const Eigen::Matrix<T, 3, 1> &rvec, T *q) {
Eigen::Quaternion<T> quat = AngleAxisToQuaternion<T>(rvec);
q[0] = quat.x();
q[1] = quat.y();
q[2] = quat.z();
q[3] = quat.w();
}
template <typename T>
Eigen::Matrix<T, 3, 1> RotationToAngleAxis(const Eigen::Matrix<T, 3, 3> &rmat) {
Eigen::AngleAxis<T> angleaxis;
angleaxis.fromRotationMatrix(rmat);
return angleaxis.angle() * angleaxis.axis();
}
template <typename T>
void QuaternionToAngleAxis(const T *const q, Eigen::Matrix<T, 3, 1> &rvec) {
Eigen::Quaternion<T> quat(q[3], q[0], q[1], q[2]);
Eigen::Matrix<T, 3, 3> rmat = quat.toRotationMatrix();
Eigen::AngleAxis<T> angleaxis;
angleaxis.fromRotationMatrix(rmat);
rvec = angleaxis.angle() * angleaxis.axis();
}
template <typename T>
Eigen::Matrix<T, 3, 3> QuaternionToRotation(const T *const q) {
T R[9];
ceres::QuaternionToRotation(q, R);
Eigen::Matrix<T, 3, 3> rmat;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
rmat(i, j) = R[i * 3 + j];
}
}
return rmat;
}
template <typename T>
void QuaternionToRotation(const T *const q, T *rot) {
ceres::QuaternionToRotation(q, rot);
}
template <typename T>
Eigen::Matrix<T, 4, 4> QuaternionMultMatLeft(const Eigen::Quaternion<T> &q) {
return (Eigen::Matrix<T, 4, 4>() << q.w(), -q.z(), q.y(), q.x(), q.z(), q.w(),
-q.x(), q.y(), -q.y(), q.x(), q.w(), q.z(), -q.x(), -q.y(), -q.z(),
q.w())
.finished();
}
template <typename T>
Eigen::Matrix<T, 4, 4> QuaternionMultMatRight(const Eigen::Quaternion<T> &q) {
return (Eigen::Matrix<T, 4, 4>() << q.w(), q.z(), -q.y(), q.x(), -q.z(),
q.w(), q.x(), q.y(), q.y(), -q.x(), q.w(), q.z(), -q.x(), -q.y(),
-q.z(), q.w())
.finished();
}
/// @param theta - rotation about screw axis
/// @param d - projection of tvec on the rotation axis
/// @param l - screw axis direction
/// @param m - screw axis moment
template <typename T>
void AngleAxisAndTranslationToScrew(
const Eigen::Matrix<T, 3, 1> &rvec, const Eigen::Matrix<T, 3, 1> &tvec,
T &theta, T &d, Eigen::Matrix<T, 3, 1> &l, Eigen::Matrix<T, 3, 1> &m) {
theta = rvec.norm();
if (theta == 0) {
l.setZero();
m.setZero();
std::cout << "Warning: Undefined screw! Returned 0. " << std::endl;
}
l = rvec.normalized();
Eigen::Matrix<T, 3, 1> t = tvec;
d = t.transpose() * l;
// point on screw axis - projection of origin on screw axis
Eigen::Matrix<T, 3, 1> c;
c = 0.5 * (t - d * l + (1.0 / tan(theta / 2.0) * l).cross(t));
// c and hence the screw axis is not defined if theta is either 0 or M_PI
m = c.cross(l);
}
template <typename T>
Eigen::Matrix<T, 3, 3> RPY2mat(T roll, T pitch, T yaw) {
Eigen::Matrix<T, 3, 3> m;
T cr = cos(roll);
T sr = sin(roll);
T cp = cos(pitch);
T sp = sin(pitch);
T cy = cos(yaw);
T sy = sin(yaw);
m(0, 0) = cy * cp;
m(0, 1) = cy * sp * sr - sy * cr;
m(0, 2) = cy * sp * cr + sy * sr;
m(1, 0) = sy * cp;
m(1, 1) = sy * sp * sr + cy * cr;
m(1, 2) = sy * sp * cr - cy * sr;
m(2, 0) = -sp;
m(2, 1) = cp * sr;
m(2, 2) = cp * cr;
return m;
}
template <typename T>
void mat2RPY(const Eigen::Matrix<T, 3, 3> &m, T &roll, T &pitch, T &yaw) {
roll = atan2(m(2, 1), m(2, 2));
pitch = atan2(-m(2, 0), sqrt(m(2, 1) * m(2, 1) + m(2, 2) * m(2, 2)));
yaw = atan2(m(1, 0), m(0, 0));
}
template <typename T>
Eigen::Matrix<T, 4, 4> homogeneousTransform(
const Eigen::Matrix<T, 3, 3> &R, const Eigen::Matrix<T, 3, 1> &t) {
Eigen::Matrix<T, 4, 4> H;
H.setIdentity();
H.block(0, 0, 3, 3) = R;
H.block(0, 3, 3, 1) = t;
return H;
}
template <typename T>
Eigen::Matrix<T, 4, 4> poseWithCartesianTranslation(
const T *const q, const T *const p) {
Eigen::Matrix<T, 4, 4> pose = Eigen::Matrix<T, 4, 4>::Identity();
T rotation[9];
ceres::QuaternionToRotation(q, rotation);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
pose(i, j) = rotation[i * 3 + j];
}
}
pose(0, 3) = p[0];
pose(1, 3) = p[1];
pose(2, 3) = p[2];
return pose;
}
template <typename T>
Eigen::Matrix<T, 4, 4> poseWithSphericalTranslation(
const T *const q, const T *const p, const T scale = T(1.0)) {
Eigen::Matrix<T, 4, 4> pose = Eigen::Matrix<T, 4, 4>::Identity();
T rotation[9];
ceres::QuaternionToRotation(q, rotation);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
pose(i, j) = rotation[i * 3 + j];
}
}
T theta = p[0];
T phi = p[1];
pose(0, 3) = sin(theta) * cos(phi) * scale;
pose(1, 3) = sin(theta) * sin(phi) * scale;
pose(2, 3) = cos(theta) * scale;
return pose;
}
// Returns the Sampson error of a given essential matrix and 2 image points
template <typename T>
T sampsonError(
const Eigen::Matrix<T, 3, 3> &E, const Eigen::Matrix<T, 3, 1> &p1,
const Eigen::Matrix<T, 3, 1> &p2) {
Eigen::Matrix<T, 3, 1> Ex1 = E * p1;
Eigen::Matrix<T, 3, 1> Etx2 = E.transpose() * p2;
T x2tEx1 = p2.dot(Ex1);
// compute Sampson error
T err = square(x2tEx1) / (square(Ex1(0, 0)) + square(Ex1(1, 0)) +
square(Etx2(0, 0)) + square(Etx2(1, 0)));
return err;
}
// Returns the Sampson error of a given rotation/translation and 2 image points
template <typename T>
T sampsonError(
const Eigen::Matrix<T, 3, 3> &R, const Eigen::Matrix<T, 3, 1> &t,
const Eigen::Matrix<T, 3, 1> &p1, const Eigen::Matrix<T, 3, 1> &p2) {
// construct essential matrix
Eigen::Matrix<T, 3, 3> E = skew(t) * R;
Eigen::Matrix<T, 3, 1> Ex1 = E * p1;
Eigen::Matrix<T, 3, 1> Etx2 = E.transpose() * p2;
T x2tEx1 = p2.dot(Ex1);
// compute Sampson error
T err = square(x2tEx1) / (square(Ex1(0, 0)) + square(Ex1(1, 0)) +
square(Etx2(0, 0)) + square(Etx2(1, 0)));
return err;
}
// Returns the Sampson error of a given rotation/translation and 2 image points
template <typename T>
T sampsonError(
const Eigen::Matrix<T, 4, 4> &H, const Eigen::Matrix<T, 3, 1> &p1,
const Eigen::Matrix<T, 3, 1> &p2) {
Eigen::Matrix<T, 3, 3> R = H.block(0, 0, 3, 3);
Eigen::Matrix<T, 3, 1> t = H.block(0, 3, 3, 1);
return sampsonError(R, t, p1, p2);
}
template <typename T>
Eigen::Matrix<T, 3, 1> transformPoint(
const Eigen::Matrix<T, 4, 4> &H, const Eigen::Matrix<T, 3, 1> &P) {
Eigen::Matrix<T, 3, 1> P_trans =
H.block(0, 0, 3, 3) * P + H.block(0, 3, 3, 1);
return P_trans;
}
template <typename T>
Eigen::Matrix<T, 4, 4> estimate3DRigidTransform(
const std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
&points1,
const std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
&points2) {
// compute centroids
Eigen::Matrix<T, 3, 1> c1, c2;
c1.setZero();
c2.setZero();
for (size_t i = 0; i < points1.size(); ++i) {
c1 += points1.at(i);
c2 += points2.at(i);
}
c1 /= points1.size();
c2 /= points1.size();
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> X(3, points1.size());
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> Y(3, points1.size());
for (size_t i = 0; i < points1.size(); ++i) {
X.col(i) = points1.at(i) - c1;
Y.col(i) = points2.at(i) - c2;
}
Eigen::Matrix<T, 3, 3> H = X * Y.transpose();
Eigen::JacobiSVD<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > svd(
H, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix<T, 3, 3> U = svd.matrixU();
Eigen::Matrix<T, 3, 3> V = svd.matrixV();
if (U.determinant() * V.determinant() < 0.0) {
V.col(2) *= -1.0;
}
Eigen::Matrix<T, 3, 3> R = V * U.transpose();
Eigen::Matrix<T, 3, 1> t = c2 - R * c1;
return homogeneousTransform(R, t);
}
template <typename T>
Eigen::Matrix<T, 4, 4> estimate3DRigidSimilarityTransform(
const std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
&points1,
const std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
&points2) {
// compute centroids
Eigen::Matrix<T, 3, 1> c1, c2;
c1.setZero();
c2.setZero();
for (size_t i = 0; i < points1.size(); ++i) {
c1 += points1.at(i);
c2 += points2.at(i);
}
c1 /= points1.size();
c2 /= points1.size();
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> X(3, points1.size());
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> Y(3, points1.size());
for (size_t i = 0; i < points1.size(); ++i) {
X.col(i) = points1.at(i) - c1;
Y.col(i) = points2.at(i) - c2;
}
Eigen::Matrix<T, 3, 3> H = X * Y.transpose();
Eigen::JacobiSVD<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > svd(
H, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix<T, 3, 3> U = svd.matrixU();
Eigen::Matrix<T, 3, 3> V = svd.matrixV();
if (U.determinant() * V.determinant() < 0.0) {
V.col(2) *= -1.0;
}
Eigen::Matrix<T, 3, 3> R = V * U.transpose();
std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
rotatedPoints1(points1.size());
for (size_t i = 0; i < points1.size(); ++i) {
rotatedPoints1.at(i) = R * (points1.at(i) - c1);
}
double sum_ss = 0.0, sum_tt = 0.0;
for (size_t i = 0; i < points1.size(); ++i) {
sum_ss += (points1.at(i) - c1).squaredNorm();
sum_tt += (points2.at(i) - c2).dot(rotatedPoints1.at(i));
}
double scale = sum_tt / sum_ss;
Eigen::Matrix<T, 3, 3> sR = scale * R;
Eigen::Matrix<T, 3, 1> t = c2 - sR * c1;
return homogeneousTransform(sR, t);
}
}
#endif

View File

@@ -1,78 +0,0 @@
#ifndef CAMERACALIBRATION_H
#define CAMERACALIBRATION_H
#include <opencv2/core/core.hpp>
#include "camodocal/camera_models/Camera.h"
namespace camodocal {
class CameraCalibration {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
CameraCalibration();
CameraCalibration(
Camera::ModelType modelType, const std::string &cameraName,
const cv::Size &imageSize, const cv::Size &boardSize, float squareSize);
void clear(void);
void addChessboardData(const std::vector<cv::Point2f> &corners);
bool calibrate(void);
int sampleCount(void) const;
std::vector<std::vector<cv::Point2f> > &imagePoints(void);
const std::vector<std::vector<cv::Point2f> > &imagePoints(void) const;
std::vector<std::vector<cv::Point3f> > &scenePoints(void);
const std::vector<std::vector<cv::Point3f> > &scenePoints(void) const;
CameraPtr &camera(void);
const CameraConstPtr camera(void) const;
Eigen::Matrix2d &measurementCovariance(void);
const Eigen::Matrix2d &measurementCovariance(void) const;
cv::Mat &cameraPoses(void);
const cv::Mat &cameraPoses(void) const;
void drawResults(std::vector<cv::Mat> &images) const;
void writeParams(const std::string &filename) const;
bool writeChessboardData(const std::string &filename) const;
bool readChessboardData(const std::string &filename);
void setVerbose(bool verbose);
private:
bool calibrateHelper(
CameraPtr &camera, std::vector<cv::Mat> &rvecs,
std::vector<cv::Mat> &tvecs) const;
void optimize(
CameraPtr &camera, std::vector<cv::Mat> &rvecs,
std::vector<cv::Mat> &tvecs) const;
template <typename T>
void readData(std::ifstream &ifs, T &data) const;
template <typename T>
void writeData(std::ofstream &ofs, T data) const;
cv::Size m_boardSize;
float m_squareSize;
CameraPtr m_camera;
cv::Mat m_cameraPoses;
std::vector<std::vector<cv::Point2f> > m_imagePoints;
std::vector<std::vector<cv::Point3f> > m_scenePoints;
Eigen::Matrix2d m_measurementCovariance;
bool m_verbose;
};
}
#endif

View File

@@ -1,53 +0,0 @@
#ifndef STEREOCAMERACALIBRATION_H
#define STEREOCAMERACALIBRATION_H
#include "CameraCalibration.h"
namespace camodocal {
class StereoCameraCalibration {
public:
StereoCameraCalibration(
Camera::ModelType modelType, const std::string &cameraLeftName,
const std::string &cameraRightName, const cv::Size &imageSize,
const cv::Size &boardSize, float squareSize);
void clear(void);
void addChessboardData(
const std::vector<cv::Point2f> &cornersLeft,
const std::vector<cv::Point2f> &cornersRight);
bool calibrate(void);
int sampleCount(void) const;
const std::vector<std::vector<cv::Point2f> > &imagePointsLeft(void) const;
const std::vector<std::vector<cv::Point2f> > &imagePointsRight(void) const;
const std::vector<std::vector<cv::Point3f> > &scenePoints(void) const;
CameraPtr &cameraLeft(void);
const CameraConstPtr cameraLeft(void) const;
CameraPtr &cameraRight(void);
const CameraConstPtr cameraRight(void) const;
void drawResults(
std::vector<cv::Mat> &imagesLeft,
std::vector<cv::Mat> &imagesRight) const;
void writeParams(const std::string &directory) const;
void setVerbose(bool verbose);
private:
CameraCalibration m_calibLeft;
CameraCalibration m_calibRight;
Eigen::Quaterniond m_q;
Eigen::Vector3d m_t;
bool m_verbose;
std::vector<double> stereo_error;
};
}
#endif

View File

@@ -1,29 +0,0 @@
#ifndef CAMERAFACTORY_H
#define CAMERAFACTORY_H
#include <boost/shared_ptr.hpp>
#include <opencv2/core/core.hpp>
#include "camodocal/camera_models/Camera.h"
namespace camodocal {
class CameraFactory {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
CameraFactory();
static boost::shared_ptr<CameraFactory> instance(void);
CameraPtr generateCamera(
Camera::ModelType modelType, const std::string &cameraName,
cv::Size imageSize) const;
CameraPtr generateCameraFromYamlFile(const std::string &filename);
private:
static boost::shared_ptr<CameraFactory> m_instance;
};
}
#endif

View File

@@ -1,205 +0,0 @@
#ifndef CATACAMERA_H
#define CATACAMERA_H
#include <opencv2/core/core.hpp>
#include <string>
#include "Camera.h"
#include "ceres/rotation.h"
namespace camodocal {
/**
* C. Mei, and P. Rives, Single View Point Omnidirectional Camera Calibration
* from Planar Grids, ICRA 2007
*/
class CataCamera : public Camera {
public:
class Parameters : public Camera::Parameters {
public:
Parameters();
Parameters(
const std::string &cameraName, int w, int h, double xi, double k1,
double k2, double p1, double p2, double gamma1, double gamma2,
double u0, double v0);
double &xi(void);
double &k1(void);
double &k2(void);
double &p1(void);
double &p2(void);
double &gamma1(void);
double &gamma2(void);
double &u0(void);
double &v0(void);
double xi(void) const;
double k1(void) const;
double k2(void) const;
double p1(void) const;
double p2(void) const;
double gamma1(void) const;
double gamma2(void) const;
double u0(void) const;
double v0(void) const;
bool readFromYamlFile(const std::string &filename);
void writeToYamlFile(const std::string &filename) const;
Parameters &operator=(const Parameters &other);
friend std::ostream &operator<<(
std::ostream &out, const Parameters &params);
private:
double m_xi;
double m_k1;
double m_k2;
double m_p1;
double m_p2;
double m_gamma1;
double m_gamma2;
double m_u0;
double m_v0;
};
CataCamera();
/**
* \brief Constructor from the projection model parameters
*/
CataCamera(
const std::string &cameraName, int imageWidth, int imageHeight, double xi,
double k1, double k2, double p1, double p2, double gamma1, double gamma2,
double u0, double v0);
/**
* \brief Constructor from the projection model parameters
*/
CataCamera(const Parameters &params);
Camera::ModelType modelType(void) const;
const std::string &cameraName(void) const;
int imageWidth(void) const;
int imageHeight(void) const;
void estimateIntrinsics(
const cv::Size &boardSize,
const std::vector<std::vector<cv::Point3f> > &objectPoints,
const std::vector<std::vector<cv::Point2f> > &imagePoints);
// Lift points from the image plane to the sphere
void liftSphere(const Eigen::Vector2d &p, Eigen::Vector3d &P) const;
//%output P
// Lift points from the image plane to the projective space
void liftProjective(const Eigen::Vector2d &p, Eigen::Vector3d &P) const;
//%output P
// Projects 3D points to the image plane (Pi function)
void spaceToPlane(const Eigen::Vector3d &P, Eigen::Vector2d &p) const;
//%output p
// Projects 3D points to the image plane (Pi function)
// and calculates jacobian
void spaceToPlane(
const Eigen::Vector3d &P, Eigen::Vector2d &p,
Eigen::Matrix<double, 2, 3> &J) const;
//%output p
//%output J
void undistToPlane(const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const;
//%output p
template <typename T>
static void spaceToPlane(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 2, 1> &p);
void distortion(const Eigen::Vector2d &p_u, Eigen::Vector2d &d_u) const;
void distortion(
const Eigen::Vector2d &p_u, Eigen::Vector2d &d_u,
Eigen::Matrix2d &J) const;
void initUndistortMap(
cv::Mat &map1, cv::Mat &map2, double fScale = 1.0) const;
cv::Mat initUndistortRectifyMap(
cv::Mat &map1, cv::Mat &map2, float fx = -1.0f, float fy = -1.0f,
cv::Size imageSize = cv::Size(0, 0), float cx = -1.0f, float cy = -1.0f,
cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;
int parameterCount(void) const;
const Parameters &getParameters(void) const;
void setParameters(const Parameters &parameters);
void readParameters(const std::vector<double> &parameterVec);
void writeParameters(std::vector<double> &parameterVec) const;
void writeParametersToYamlFile(const std::string &filename) const;
std::string parametersToString(void) const;
private:
Parameters mParameters;
double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
bool m_noDistortion;
};
typedef boost::shared_ptr<CataCamera> CataCameraPtr;
typedef boost::shared_ptr<const CataCamera> CataCameraConstPtr;
template <typename T>
void CataCamera::spaceToPlane(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 2, 1> &p) {
T P_w[3];
P_w[0] = T(P(0));
P_w[1] = T(P(1));
P_w[2] = T(P(2));
// Convert quaternion from Eigen convention (x, y, z, w)
// to Ceres convention (w, x, y, z)
T q_ceres[4] = {q[3], q[0], q[1], q[2]};
T P_c[3];
ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);
P_c[0] += t[0];
P_c[1] += t[1];
P_c[2] += t[2];
// project 3D object point to the image plane
T xi = params[0];
T k1 = params[1];
T k2 = params[2];
T p1 = params[3];
T p2 = params[4];
T gamma1 = params[5];
T gamma2 = params[6];
T alpha = T(0); // cameraParams.alpha();
T u0 = params[7];
T v0 = params[8];
// Transform to model plane
T len = sqrt(P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2]);
P_c[0] /= len;
P_c[1] /= len;
P_c[2] /= len;
T u = P_c[0] / (P_c[2] + xi);
T v = P_c[1] / (P_c[2] + xi);
T rho_sqr = u * u + v * v;
T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr;
T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u);
T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v;
u = L * u + du;
v = L * v + dv;
p(0) = gamma1 * (u + alpha * v) + u0;
p(1) = gamma2 * v + v0;
}
}
#endif

View File

@@ -1,71 +0,0 @@
#ifndef COSTFUNCTIONFACTORY_H
#define COSTFUNCTIONFACTORY_H
#include <boost/shared_ptr.hpp>
#include <opencv2/core/core.hpp>
#include "camodocal/camera_models/Camera.h"
namespace ceres {
class CostFunction;
}
namespace camodocal {
enum {
CAMERA_INTRINSICS = 1 << 0,
CAMERA_POSE = 1 << 1,
POINT_3D = 1 << 2,
ODOMETRY_INTRINSICS = 1 << 3,
ODOMETRY_3D_POSE = 1 << 4,
ODOMETRY_6D_POSE = 1 << 5,
CAMERA_ODOMETRY_TRANSFORM = 1 << 6
};
class CostFunctionFactory {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
CostFunctionFactory();
static boost::shared_ptr<CostFunctionFactory> instance(void);
ceres::CostFunction *generateCostFunction(
const CameraConstPtr &camera, const Eigen::Vector3d &observed_P,
const Eigen::Vector2d &observed_p, int flags) const;
ceres::CostFunction *generateCostFunction(
const CameraConstPtr &camera, const Eigen::Vector3d &observed_P,
const Eigen::Vector2d &observed_p,
const Eigen::Matrix2d &sqrtPrecisionMat, int flags) const;
ceres::CostFunction *generateCostFunction(
const CameraConstPtr &camera, const Eigen::Vector2d &observed_p,
int flags, bool optimize_cam_odo_z = true) const;
ceres::CostFunction *generateCostFunction(
const CameraConstPtr &camera, const Eigen::Vector2d &observed_p,
const Eigen::Matrix2d &sqrtPrecisionMat, int flags,
bool optimize_cam_odo_z = true) const;
ceres::CostFunction *generateCostFunction(
const CameraConstPtr &camera, const Eigen::Vector3d &odo_pos,
const Eigen::Vector3d &odo_att, const Eigen::Vector2d &observed_p,
int flags, bool optimize_cam_odo_z = true) const;
ceres::CostFunction *generateCostFunction(
const CameraConstPtr &camera, const Eigen::Quaterniond &cam_odo_q,
const Eigen::Vector3d &cam_odo_t, const Eigen::Vector3d &odo_pos,
const Eigen::Vector3d &odo_att, const Eigen::Vector2d &observed_p,
int flags) const;
ceres::CostFunction *generateCostFunction(
const CameraConstPtr &cameraLeft, const CameraConstPtr &cameraRight,
const Eigen::Vector3d &observed_P, const Eigen::Vector2d &observed_p_left,
const Eigen::Vector2d &observed_p_right) const;
private:
static boost::shared_ptr<CostFunctionFactory> m_instance;
};
}
#endif

View File

@@ -5,7 +5,6 @@
#include <string>
#include "Camera.h"
#include "ceres/rotation.h"
namespace camodocal {
@@ -14,6 +13,25 @@ namespace camodocal {
* for Conventional, Wide-Angle, and Fish-Eye Lenses, PAMI 2006
*/
template <typename T> inline
void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
// 'scale' is 1 / norm(q).
const T scale = T(1) / sqrt(q[0] * q[0] +
q[1] * q[1] +
q[2] * q[2] +
q[3] * q[3]);
// Make unit-norm version of q.
const T unit[4] = {
scale * q[0],
scale * q[1],
scale * q[2],
scale * q[3],
};
UnitQuaternionRotatePoint(unit, pt, result);
}
class EquidistantCamera : public Camera {
public:
class Parameters : public Camera::Parameters {
@@ -181,7 +199,7 @@ void EquidistantCamera::spaceToPlane(
T q_ceres[4] = {q[3], q[0], q[1], q[2]};
T P_c[3];
ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);
QuaternionRotatePoint(q_ceres, P_w, P_c);
P_c[0] += t[0];
P_c[1] += t[1];

View File

@@ -1,191 +0,0 @@
#ifndef PINHOLECAMERA_H
#define PINHOLECAMERA_H
#include <opencv2/core/core.hpp>
#include <string>
#include "Camera.h"
#include "ceres/rotation.h"
namespace camodocal {
class PinholeCamera : public Camera {
public:
class Parameters : public Camera::Parameters {
public:
Parameters();
Parameters(
const std::string &cameraName, int w, int h, double k1, double k2,
double p1, double p2, double fx, double fy, double cx, double cy);
double &k1(void);
double &k2(void);
double &p1(void);
double &p2(void);
double &fx(void);
double &fy(void);
double &cx(void);
double &cy(void);
double xi(void) const;
double k1(void) const;
double k2(void) const;
double p1(void) const;
double p2(void) const;
double fx(void) const;
double fy(void) const;
double cx(void) const;
double cy(void) const;
bool readFromYamlFile(const std::string &filename);
void writeToYamlFile(const std::string &filename) const;
Parameters &operator=(const Parameters &other);
friend std::ostream &operator<<(
std::ostream &out, const Parameters &params);
private:
double m_k1;
double m_k2;
double m_p1;
double m_p2;
double m_fx;
double m_fy;
double m_cx;
double m_cy;
};
PinholeCamera();
/**
* \brief Constructor from the projection model parameters
*/
PinholeCamera(
const std::string &cameraName, int imageWidth, int imageHeight, double k1,
double k2, double p1, double p2, double fx, double fy, double cx,
double cy);
/**
* \brief Constructor from the projection model parameters
*/
PinholeCamera(const Parameters &params);
Camera::ModelType modelType(void) const;
const std::string &cameraName(void) const;
int imageWidth(void) const;
int imageHeight(void) const;
void estimateIntrinsics(
const cv::Size &boardSize,
const std::vector<std::vector<cv::Point3f> > &objectPoints,
const std::vector<std::vector<cv::Point2f> > &imagePoints);
// Lift points from the image plane to the sphere
virtual void liftSphere(const Eigen::Vector2d &p, Eigen::Vector3d &P) const;
//%output P
// Lift points from the image plane to the projective space
void liftProjective(const Eigen::Vector2d &p, Eigen::Vector3d &P) const;
//%output P
// Projects 3D points to the image plane (Pi function)
void spaceToPlane(const Eigen::Vector3d &P, Eigen::Vector2d &p) const;
//%output p
// Projects 3D points to the image plane (Pi function)
// and calculates jacobian
void spaceToPlane(
const Eigen::Vector3d &P, Eigen::Vector2d &p,
Eigen::Matrix<double, 2, 3> &J) const;
//%output p
//%output J
void undistToPlane(const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const;
//%output p
template <typename T>
static void spaceToPlane(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 2, 1> &p);
void distortion(const Eigen::Vector2d &p_u, Eigen::Vector2d &d_u) const;
void distortion(
const Eigen::Vector2d &p_u, Eigen::Vector2d &d_u,
Eigen::Matrix2d &J) const;
void initUndistortMap(
cv::Mat &map1, cv::Mat &map2, double fScale = 1.0) const;
cv::Mat initUndistortRectifyMap(
cv::Mat &map1, cv::Mat &map2, float fx = -1.0f, float fy = -1.0f,
cv::Size imageSize = cv::Size(0, 0), float cx = -1.0f, float cy = -1.0f,
cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;
int parameterCount(void) const;
const Parameters &getParameters(void) const;
void setParameters(const Parameters &parameters);
void readParameters(const std::vector<double> &parameterVec);
void writeParameters(std::vector<double> &parameterVec) const;
void writeParametersToYamlFile(const std::string &filename) const;
std::string parametersToString(void) const;
private:
Parameters mParameters;
double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
bool m_noDistortion;
};
typedef boost::shared_ptr<PinholeCamera> PinholeCameraPtr;
typedef boost::shared_ptr<const PinholeCamera> PinholeCameraConstPtr;
template <typename T>
void PinholeCamera::spaceToPlane(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 2, 1> &p) {
T P_w[3];
P_w[0] = T(P(0));
P_w[1] = T(P(1));
P_w[2] = T(P(2));
// Convert quaternion from Eigen convention (x, y, z, w)
// to Ceres convention (w, x, y, z)
T q_ceres[4] = {q[3], q[0], q[1], q[2]};
T P_c[3];
ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);
P_c[0] += t[0];
P_c[1] += t[1];
P_c[2] += t[2];
// project 3D object point to the image plane
T k1 = params[0];
T k2 = params[1];
T p1 = params[2];
T p2 = params[3];
T fx = params[4];
T fy = params[5];
T alpha = T(0); // cameraParams.alpha();
T cx = params[6];
T cy = params[7];
// Transform to model plane
T u = P_c[0] / P_c[2];
T v = P_c[1] / P_c[2];
T rho_sqr = u * u + v * v;
T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr;
T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u);
T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v;
u = L * u + du;
v = L * v + dv;
p(0) = fx * (u + alpha * v) + cx;
p(1) = fy * v + cy;
}
}
#endif

View File

@@ -1,364 +0,0 @@
#ifndef SCARAMUZZACAMERA_H
#define SCARAMUZZACAMERA_H
#include <opencv2/core/core.hpp>
#include <string>
#include "Camera.h"
#include "ceres/rotation.h"
namespace camodocal {
#define SCARAMUZZA_POLY_SIZE 5
#define SCARAMUZZA_INV_POLY_SIZE 20
#define SCARAMUZZA_CAMERA_NUM_PARAMS \
(SCARAMUZZA_POLY_SIZE + SCARAMUZZA_INV_POLY_SIZE + 2 /*center*/ + \
3 /*affine*/)
/**
* Scaramuzza Camera (Omnidirectional)
* https://sites.google.com/site/scarabotix/ocamcalib-toolbox
*/
class OCAMCamera : public Camera {
public:
class Parameters : public Camera::Parameters {
public:
Parameters();
double &C(void) {
return m_C;
}
double &D(void) {
return m_D;
}
double &E(void) {
return m_E;
}
double &center_x(void) {
return m_center_x;
}
double &center_y(void) {
return m_center_y;
}
double &poly(int idx) {
return m_poly[idx];
}
double &inv_poly(int idx) {
return m_inv_poly[idx];
}
double C(void) const {
return m_C;
}
double D(void) const {
return m_D;
}
double E(void) const {
return m_E;
}
double center_x(void) const {
return m_center_x;
}
double center_y(void) const {
return m_center_y;
}
double poly(int idx) const {
return m_poly[idx];
}
double inv_poly(int idx) const {
return m_inv_poly[idx];
}
bool readFromYamlFile(const std::string &filename);
void writeToYamlFile(const std::string &filename) const;
Parameters &operator=(const Parameters &other);
friend std::ostream &operator<<(
std::ostream &out, const Parameters &params);
private:
double m_poly[SCARAMUZZA_POLY_SIZE];
double m_inv_poly[SCARAMUZZA_INV_POLY_SIZE];
double m_C;
double m_D;
double m_E;
double m_center_x;
double m_center_y;
};
OCAMCamera();
/**
* \brief Constructor from the projection model parameters
*/
OCAMCamera(const Parameters &params);
Camera::ModelType modelType(void) const;
const std::string &cameraName(void) const;
int imageWidth(void) const;
int imageHeight(void) const;
void estimateIntrinsics(
const cv::Size &boardSize,
const std::vector<std::vector<cv::Point3f> > &objectPoints,
const std::vector<std::vector<cv::Point2f> > &imagePoints);
// Lift points from the image plane to the sphere
void liftSphere(const Eigen::Vector2d &p, Eigen::Vector3d &P) const;
//%output P
// Lift points from the image plane to the projective space
void liftProjective(const Eigen::Vector2d &p, Eigen::Vector3d &P) const;
//%output P
// Projects 3D points to the image plane (Pi function)
void spaceToPlane(const Eigen::Vector3d &P, Eigen::Vector2d &p) const;
//%output p
// Projects 3D points to the image plane (Pi function)
// and calculates jacobian
// void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
// Eigen::Matrix<double,2,3>& J) const;
//%output p
//%output J
void undistToPlane(const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const;
//%output p
template <typename T>
static void spaceToPlane(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 2, 1> &p);
template <typename T>
static void spaceToSphere(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 3, 1> &P_s);
template <typename T>
static void LiftToSphere(
const T *const params, const Eigen::Matrix<T, 2, 1> &p,
Eigen::Matrix<T, 3, 1> &P);
template <typename T>
static void SphereToPlane(
const T *const params, const Eigen::Matrix<T, 3, 1> &P,
Eigen::Matrix<T, 2, 1> &p);
void initUndistortMap(
cv::Mat &map1, cv::Mat &map2, double fScale = 1.0) const;
cv::Mat initUndistortRectifyMap(
cv::Mat &map1, cv::Mat &map2, float fx = -1.0f, float fy = -1.0f,
cv::Size imageSize = cv::Size(0, 0), float cx = -1.0f, float cy = -1.0f,
cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;
int parameterCount(void) const;
const Parameters &getParameters(void) const;
void setParameters(const Parameters &parameters);
void readParameters(const std::vector<double> &parameterVec);
void writeParameters(std::vector<double> &parameterVec) const;
void writeParametersToYamlFile(const std::string &filename) const;
std::string parametersToString(void) const;
private:
Parameters mParameters;
double m_inv_scale;
};
typedef boost::shared_ptr<OCAMCamera> OCAMCameraPtr;
typedef boost::shared_ptr<const OCAMCamera> OCAMCameraConstPtr;
template <typename T>
void OCAMCamera::spaceToPlane(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 2, 1> &p) {
T P_c[3];
{
T P_w[3];
P_w[0] = T(P(0));
P_w[1] = T(P(1));
P_w[2] = T(P(2));
// Convert quaternion from Eigen convention (x, y, z, w)
// to Ceres convention (w, x, y, z)
T q_ceres[4] = {q[3], q[0], q[1], q[2]};
ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);
P_c[0] += t[0];
P_c[1] += t[1];
P_c[2] += t[2];
}
T c = params[0];
T d = params[1];
T e = params[2];
T xc[2] = {params[3], params[4]};
// T poly[SCARAMUZZA_POLY_SIZE];
// for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++)
// poly[i] = params[5+i];
T inv_poly[SCARAMUZZA_INV_POLY_SIZE];
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
inv_poly[i] = params[5 + SCARAMUZZA_POLY_SIZE + i];
T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1];
T norm = T(0.0);
if (norm_sqr > T(0.0))
norm = sqrt(norm_sqr);
T theta = atan2(-P_c[2], norm);
T rho = T(0.0);
T theta_i = T(1.0);
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) {
rho += theta_i * inv_poly[i];
theta_i *= theta;
}
T invNorm = T(1.0) / norm;
T xn[2] = {P_c[0] * invNorm * rho, P_c[1] * invNorm * rho};
p(0) = xn[0] * c + xn[1] * d + xc[0];
p(1) = xn[0] * e + xn[1] + xc[1];
}
template <typename T>
void OCAMCamera::spaceToSphere(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 3, 1> &P_s) {
T P_c[3];
{
T P_w[3];
P_w[0] = T(P(0));
P_w[1] = T(P(1));
P_w[2] = T(P(2));
// Convert quaternion from Eigen convention (x, y, z, w)
// to Ceres convention (w, x, y, z)
T q_ceres[4] = {q[3], q[0], q[1], q[2]};
ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);
P_c[0] += t[0];
P_c[1] += t[1];
P_c[2] += t[2];
}
// T poly[SCARAMUZZA_POLY_SIZE];
// for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++)
// poly[i] = params[5+i];
T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2];
T norm = T(0.0);
if (norm_sqr > T(0.0))
norm = sqrt(norm_sqr);
P_s(0) = P_c[0] / norm;
P_s(1) = P_c[1] / norm;
P_s(2) = P_c[2] / norm;
}
template <typename T>
void OCAMCamera::LiftToSphere(
const T *const params, const Eigen::Matrix<T, 2, 1> &p,
Eigen::Matrix<T, 3, 1> &P) {
T c = params[0];
T d = params[1];
T e = params[2];
T cc[2] = {params[3], params[4]};
T poly[SCARAMUZZA_POLY_SIZE];
for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++)
poly[i] = params[5 + i];
// Relative to Center
T p_2d[2];
p_2d[0] = T(p(0));
p_2d[1] = T(p(1));
T xc[2] = {p_2d[0] - cc[0], p_2d[1] - cc[1]};
T inv_scale = T(1.0) / (c - d * e);
// Affine Transformation
T xc_a[2];
xc_a[0] = inv_scale * (xc[0] - d * xc[1]);
xc_a[1] = inv_scale * (-e * xc[0] + c * xc[1]);
T norm_sqr = xc_a[0] * xc_a[0] + xc_a[1] * xc_a[1];
T phi = sqrt(norm_sqr);
T phi_i = T(1.0);
T z = T(0.0);
for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++) {
if (i != 1) {
z += phi_i * poly[i];
}
phi_i *= phi;
}
T p_3d[3];
p_3d[0] = xc[0];
p_3d[1] = xc[1];
p_3d[2] = -z;
T p_3d_norm_sqr = p_3d[0] * p_3d[0] + p_3d[1] * p_3d[1] + p_3d[2] * p_3d[2];
T p_3d_norm = sqrt(p_3d_norm_sqr);
P << p_3d[0] / p_3d_norm, p_3d[1] / p_3d_norm, p_3d[2] / p_3d_norm;
}
template <typename T>
void OCAMCamera::SphereToPlane(
const T *const params, const Eigen::Matrix<T, 3, 1> &P,
Eigen::Matrix<T, 2, 1> &p) {
T P_c[3];
{
P_c[0] = T(P(0));
P_c[1] = T(P(1));
P_c[2] = T(P(2));
}
T c = params[0];
T d = params[1];
T e = params[2];
T xc[2] = {params[3], params[4]};
T inv_poly[SCARAMUZZA_INV_POLY_SIZE];
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
inv_poly[i] = params[5 + SCARAMUZZA_POLY_SIZE + i];
T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1];
T norm = T(0.0);
if (norm_sqr > T(0.0))
norm = sqrt(norm_sqr);
T theta = atan2(-P_c[2], norm);
T rho = T(0.0);
T theta_i = T(1.0);
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) {
rho += theta_i * inv_poly[i];
theta_i *= theta;
}
T invNorm = T(1.0) / norm;
T xn[2] = {P_c[0] * invNorm * rho, P_c[1] * invNorm * rho};
p(0) = xn[0] * c + xn[1] * d + xc[0];
p(1) = xn[0] * e + xn[1] + xc[1];
}
}
#endif

View File

@@ -1,85 +0,0 @@
#ifndef CHESSBOARD_H
#define CHESSBOARD_H
#include <boost/shared_ptr.hpp>
#include <opencv2/core/core.hpp>
namespace camodocal {
// forward declarations
class ChessboardCorner;
typedef boost::shared_ptr<ChessboardCorner> ChessboardCornerPtr;
class ChessboardQuad;
typedef boost::shared_ptr<ChessboardQuad> ChessboardQuadPtr;
class Chessboard {
public:
Chessboard(cv::Size boardSize, cv::Mat &image);
void findCorners(bool useOpenCV = false);
const std::vector<cv::Point2f> &getCorners(void) const;
bool cornersFound(void) const;
const cv::Mat &getImage(void) const;
const cv::Mat &getSketch(void) const;
private:
bool findChessboardCorners(
const cv::Mat &image, const cv::Size &patternSize,
std::vector<cv::Point2f> &corners, int flags, bool useOpenCV);
bool findChessboardCornersImproved(
const cv::Mat &image, const cv::Size &patternSize,
std::vector<cv::Point2f> &corners, int flags);
void cleanFoundConnectedQuads(
std::vector<ChessboardQuadPtr> &quadGroup, cv::Size patternSize);
void findConnectedQuads(
std::vector<ChessboardQuadPtr> &quads,
std::vector<ChessboardQuadPtr> &group, int group_idx, int dilation);
// int checkQuadGroup(std::vector<ChessboardQuadPtr>& quadGroup,
// std::vector<ChessboardCornerPtr>& outCorners,
// cv::Size patternSize);
void labelQuadGroup(
std::vector<ChessboardQuadPtr> &quad_group, cv::Size patternSize,
bool firstRun);
void findQuadNeighbors(std::vector<ChessboardQuadPtr> &quads, int dilation);
int augmentBestRun(
std::vector<ChessboardQuadPtr> &candidateQuads, int candidateDilation,
std::vector<ChessboardQuadPtr> &existingQuads, int existingDilation);
void generateQuads(
std::vector<ChessboardQuadPtr> &quads, cv::Mat &image, int flags,
int dilation, bool firstRun);
bool checkQuadGroup(
std::vector<ChessboardQuadPtr> &quads,
std::vector<ChessboardCornerPtr> &corners, cv::Size patternSize);
void getQuadrangleHypotheses(
const std::vector<std::vector<cv::Point> > &contours,
std::vector<std::pair<float, int> > &quads, int classId) const;
bool checkChessboard(const cv::Mat &image, cv::Size patternSize) const;
bool checkBoardMonotony(
std::vector<ChessboardCornerPtr> &corners, cv::Size patternSize);
bool matchCorners(
ChessboardQuadPtr &quad1, int corner1, ChessboardQuadPtr &quad2,
int corner2) const;
cv::Mat mImage;
cv::Mat mSketch;
std::vector<cv::Point2f> mCorners;
cv::Size mBoardSize;
bool mCornersFound;
};
}
#endif

View File

@@ -1,39 +0,0 @@
#ifndef CHESSBOARDCORNER_H
#define CHESSBOARDCORNER_H
#include <boost/shared_ptr.hpp>
#include <opencv2/core/core.hpp>
namespace camodocal {
class ChessboardCorner;
typedef boost::shared_ptr<ChessboardCorner> ChessboardCornerPtr;
class ChessboardCorner {
public:
ChessboardCorner() : row(0), column(0), needsNeighbor(true), count(0) {}
float meanDist(int &n) const {
float sum = 0;
n = 0;
for (int i = 0; i < 4; ++i) {
if (neighbors[i].get()) {
float dx = neighbors[i]->pt.x - pt.x;
float dy = neighbors[i]->pt.y - pt.y;
sum += sqrt(dx * dx + dy * dy);
n++;
}
}
return sum / std::max(n, 1);
}
cv::Point2f pt; // X and y coordinates
int row; // Row and column of the corner
int column; // in the found pattern
bool needsNeighbor; // Does the corner require a neighbor?
int count; // number of corner neighbors
ChessboardCornerPtr neighbors[4]; // pointer to all corner neighbors
};
}
#endif

View File

@@ -1,27 +0,0 @@
#ifndef CHESSBOARDQUAD_H
#define CHESSBOARDQUAD_H
#include <boost/shared_ptr.hpp>
#include "camodocal/chessboard/ChessboardCorner.h"
namespace camodocal {
class ChessboardQuad;
typedef boost::shared_ptr<ChessboardQuad> ChessboardQuadPtr;
class ChessboardQuad {
public:
ChessboardQuad()
: count(0), group_idx(-1), edge_len(FLT_MAX), labeled(false) {}
int count; // Number of quad neighbors
int group_idx; // Quad group ID
float edge_len; // Smallest side length^2
ChessboardCornerPtr corners[4]; // Coordinates of quad corners
ChessboardQuadPtr neighbors[4]; // Pointers of quad neighbors
bool labeled; // Has this corner been labeled?
};
}
#endif

View File

@@ -1,336 +0,0 @@
/* dynamo:- Event driven molecular dynamics simulator
http://www.marcusbannerman.co.uk/dynamo
Copyright (C) 2011 Marcus N Campbell Bannerman <m.bannerman@gmail.com>
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 3 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <boost/numeric/ublas/lu.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/triangular.hpp>
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/vector_proxy.hpp>
#include <exception>
namespace ublas = boost::numeric::ublas;
class Spline : private std::vector<std::pair<double, double> > {
public:
// The boundary conditions available
enum BC_type { FIXED_1ST_DERIV_BC, FIXED_2ND_DERIV_BC, PARABOLIC_RUNOUT_BC };
enum Spline_type { LINEAR, CUBIC };
// Constructor takes the boundary conditions as arguments, this
// sets the first derivative (gradient) at the lower and upper
// end points
Spline()
: _valid(false),
_BCLow(FIXED_2ND_DERIV_BC),
_BCHigh(FIXED_2ND_DERIV_BC),
_BCLowVal(0),
_BCHighVal(0),
_type(CUBIC) {}
typedef std::vector<std::pair<double, double> > base;
typedef base::const_iterator const_iterator;
// Standard STL read-only container stuff
const_iterator begin() const {
return base::begin();
}
const_iterator end() const {
return base::end();
}
void clear() {
_valid = false;
base::clear();
_data.clear();
}
size_t size() const {
return base::size();
}
size_t max_size() const {
return base::max_size();
}
size_t capacity() const {
return base::capacity();
}
bool empty() const {
return base::empty();
}
// Add a point to the spline, and invalidate it so its
// recalculated on the next access
inline void addPoint(double x, double y) {
_valid = false;
base::push_back(std::pair<double, double>(x, y));
}
// Reset the boundary conditions
inline void setLowBC(BC_type BC, double val = 0) {
_BCLow = BC;
_BCLowVal = val;
_valid = false;
}
inline void setHighBC(BC_type BC, double val = 0) {
_BCHigh = BC;
_BCHighVal = val;
_valid = false;
}
void setType(Spline_type type) {
_type = type;
_valid = false;
}
// Check if the spline has been calculated, then generate the
// spline interpolated value
double operator()(double xval) {
if (!_valid)
generate();
// Special cases when we're outside the range of the spline points
if (xval <= x(0))
return lowCalc(xval);
if (xval >= x(size() - 1))
return highCalc(xval);
// Check all intervals except the last one
for (std::vector<SplineData>::const_iterator iPtr = _data.begin();
iPtr != _data.end() - 1; ++iPtr)
if ((xval >= iPtr->x) && (xval <= (iPtr + 1)->x))
return splineCalc(iPtr, xval);
return splineCalc(_data.end() - 1, xval);
}
private:
///////PRIVATE DATA MEMBERS
struct SplineData {
double x, a, b, c, d;
};
// vector of calculated spline data
std::vector<SplineData> _data;
// Second derivative at each point
ublas::vector<double> _ddy;
// Tracks whether the spline parameters have been calculated for
// the current set of points
bool _valid;
// The boundary conditions
BC_type _BCLow, _BCHigh;
// The values of the boundary conditions
double _BCLowVal, _BCHighVal;
Spline_type _type;
///////PRIVATE FUNCTIONS
// Function to calculate the value of a given spline at a point xval
inline double splineCalc(
std::vector<SplineData>::const_iterator i, double xval) {
const double lx = xval - i->x;
return ((i->a * lx + i->b) * lx + i->c) * lx + i->d;
}
inline double lowCalc(double xval) {
const double lx = xval - x(0);
if (_type == LINEAR)
return lx * _BCHighVal + y(0);
const double firstDeriv =
(y(1) - y(0)) / h(0) - 2 * h(0) * (_data[0].b + 2 * _data[1].b) / 6;
switch (_BCLow) {
case FIXED_1ST_DERIV_BC:
return lx * _BCLowVal + y(0);
case FIXED_2ND_DERIV_BC:
return lx * lx * _BCLowVal + firstDeriv * lx + y(0);
case PARABOLIC_RUNOUT_BC:
return lx * lx * _ddy[0] + lx * firstDeriv + y(0);
}
throw std::runtime_error("Unknown BC");
}
inline double highCalc(double xval) {
const double lx = xval - x(size() - 1);
if (_type == LINEAR)
return lx * _BCHighVal + y(size() - 1);
const double firstDeriv =
2 * h(size() - 2) * (_ddy[size() - 2] + 2 * _ddy[size() - 1]) / 6 +
(y(size() - 1) - y(size() - 2)) / h(size() - 2);
switch (_BCHigh) {
case FIXED_1ST_DERIV_BC:
return lx * _BCHighVal + y(size() - 1);
case FIXED_2ND_DERIV_BC:
return lx * lx * _BCHighVal + firstDeriv * lx + y(size() - 1);
case PARABOLIC_RUNOUT_BC:
return lx * lx * _ddy[size() - 1] + lx * firstDeriv + y(size() - 1);
}
throw std::runtime_error("Unknown BC");
}
// These just provide access to the point data in a clean way
inline double x(size_t i) const {
return operator[](i).first;
}
inline double y(size_t i) const {
return operator[](i).second;
}
inline double h(size_t i) const {
return x(i + 1) - x(i);
}
// Invert a arbitrary matrix using the boost ublas library
template <class T>
bool InvertMatrix(ublas::matrix<T> A, ublas::matrix<T> &inverse) {
using namespace ublas;
// create a permutation matrix for the LU-factorization
permutation_matrix<std::size_t> pm(A.size1());
// perform LU-factorization
int res = lu_factorize(A, pm);
if (res != 0)
return false;
// create identity matrix of "inverse"
inverse.assign(ublas::identity_matrix<T>(A.size1()));
// backsubstitute to get the inverse
lu_substitute(A, pm, inverse);
return true;
}
// This function will recalculate the spline parameters and store
// them in _data, ready for spline interpolation
void generate() {
if (size() < 2)
throw std::runtime_error("Spline requires at least 2 points");
// If any spline points are at the same x location, we have to
// just slightly seperate them
{
bool testPassed(false);
while (!testPassed) {
testPassed = true;
std::sort(base::begin(), base::end());
for (base::iterator iPtr = base::begin(); iPtr != base::end() - 1;
++iPtr)
if (iPtr->first == (iPtr + 1)->first) {
if ((iPtr + 1)->first != 0)
(iPtr + 1)->first += (iPtr + 1)->first *
std::numeric_limits<double>::epsilon() * 10;
else
(iPtr + 1)->first = std::numeric_limits<double>::epsilon() * 10;
testPassed = false;
break;
}
}
}
const size_t e = size() - 1;
switch (_type) {
case LINEAR: {
_data.resize(e);
for (size_t i(0); i < e; ++i) {
_data[i].x = x(i);
_data[i].a = 0;
_data[i].b = 0;
_data[i].c = (y(i + 1) - y(i)) / (x(i + 1) - x(i));
_data[i].d = y(i);
}
break;
}
case CUBIC: {
ublas::matrix<double> A(size(), size());
for (size_t yv(0); yv <= e; ++yv)
for (size_t xv(0); xv <= e; ++xv)
A(xv, yv) = 0;
for (size_t i(1); i < e; ++i) {
A(i - 1, i) = h(i - 1);
A(i, i) = 2 * (h(i - 1) + h(i));
A(i + 1, i) = h(i);
}
ublas::vector<double> C(size());
for (size_t xv(0); xv <= e; ++xv)
C(xv) = 0;
for (size_t i(1); i < e; ++i)
C(i) = 6 * ((y(i + 1) - y(i)) / h(i) - (y(i) - y(i - 1)) / h(i - 1));
// Boundary conditions
switch (_BCLow) {
case FIXED_1ST_DERIV_BC:
C(0) = 6 * ((y(1) - y(0)) / h(0) - _BCLowVal);
A(0, 0) = 2 * h(0);
A(1, 0) = h(0);
break;
case FIXED_2ND_DERIV_BC:
C(0) = _BCLowVal;
A(0, 0) = 1;
break;
case PARABOLIC_RUNOUT_BC:
C(0) = 0;
A(0, 0) = 1;
A(1, 0) = -1;
break;
}
switch (_BCHigh) {
case FIXED_1ST_DERIV_BC:
C(e) = 6 * (_BCHighVal - (y(e) - y(e - 1)) / h(e - 1));
A(e, e) = 2 * h(e - 1);
A(e - 1, e) = h(e - 1);
break;
case FIXED_2ND_DERIV_BC:
C(e) = _BCHighVal;
A(e, e) = 1;
break;
case PARABOLIC_RUNOUT_BC:
C(e) = 0;
A(e, e) = 1;
A(e - 1, e) = -1;
break;
}
ublas::matrix<double> AInv(size(), size());
InvertMatrix(A, AInv);
_ddy = ublas::prod(C, AInv);
_data.resize(size() - 1);
for (size_t i(0); i < e; ++i) {
_data[i].x = x(i);
_data[i].a = (_ddy(i + 1) - _ddy(i)) / (6 * h(i));
_data[i].b = _ddy(i) / 2;
_data[i].c = (y(i + 1) - y(i)) / h(i) - _ddy(i + 1) * h(i) / 6 -
_ddy(i) * h(i) / 3;
_data[i].d = y(i);
}
}
}
_valid = true;
}
};

View File

@@ -1,36 +0,0 @@
#ifndef EIGENQUATERNIONPARAMETERIZATION_H
#define EIGENQUATERNIONPARAMETERIZATION_H
#include "ceres/local_parameterization.h"
namespace camodocal {
class EigenQuaternionParameterization : public ceres::LocalParameterization {
public:
virtual ~EigenQuaternionParameterization() {}
virtual bool Plus(
const double *x, const double *delta, double *x_plus_delta) const;
virtual bool ComputeJacobian(const double *x, double *jacobian) const;
virtual int GlobalSize() const {
return 4;
}
virtual int LocalSize() const {
return 3;
}
private:
template <typename T>
void EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const;
};
template <typename T>
void EigenQuaternionParameterization::EigenQuaternionProduct(
const T z[4], const T w[4], T zw[4]) const {
zw[0] = z[3] * w[0] + z[0] * w[3] + z[1] * w[2] - z[2] * w[1];
zw[1] = z[3] * w[1] - z[0] * w[2] + z[1] * w[3] + z[2] * w[0];
zw[2] = z[3] * w[2] + z[0] * w[1] - z[1] * w[0] + z[2] * w[3];
zw[3] = z[3] * w[3] - z[0] * w[0] - z[1] * w[1] - z[2] * w[2];
}
}
#endif

View File

@@ -1,394 +0,0 @@
#ifndef EIGENUTILS_H
#define EIGENUTILS_H
#include "eigen3/Eigen/Dense"
#include "camodocal/gpl/gpl.h"
#include "ceres/rotation.h"
namespace camodocal {
// Returns the 3D cross product skew symmetric matrix of a given 3D vector
template <typename T>
Eigen::Matrix<T, 3, 3> skew(const Eigen::Matrix<T, 3, 1> &vec) {
return (Eigen::Matrix<T, 3, 3>() << T(0), -vec(2), vec(1), vec(2), T(0),
-vec(0), -vec(1), vec(0), T(0))
.finished();
}
template <typename Derived>
typename Eigen::MatrixBase<Derived>::PlainObject sqrtm(
const Eigen::MatrixBase<Derived> &A) {
Eigen::SelfAdjointEigenSolver<typename Derived::PlainObject> es(A);
return es.operatorSqrt();
}
template <typename T>
Eigen::Matrix<T, 3, 3> AngleAxisToRotationMatrix(
const Eigen::Matrix<T, 3, 1> &rvec) {
T angle = rvec.norm();
if (angle == T(0)) {
return Eigen::Matrix<T, 3, 3>::Identity();
}
Eigen::Matrix<T, 3, 1> axis;
axis = rvec.normalized();
Eigen::Matrix<T, 3, 3> rmat;
rmat = Eigen::AngleAxis<T>(angle, axis);
return rmat;
}
template <typename T>
Eigen::Quaternion<T> AngleAxisToQuaternion(const Eigen::Matrix<T, 3, 1> &rvec) {
Eigen::Matrix<T, 3, 3> rmat = AngleAxisToRotationMatrix<T>(rvec);
return Eigen::Quaternion<T>(rmat);
}
template <typename T>
void AngleAxisToQuaternion(const Eigen::Matrix<T, 3, 1> &rvec, T *q) {
Eigen::Quaternion<T> quat = AngleAxisToQuaternion<T>(rvec);
q[0] = quat.x();
q[1] = quat.y();
q[2] = quat.z();
q[3] = quat.w();
}
template <typename T>
Eigen::Matrix<T, 3, 1> RotationToAngleAxis(const Eigen::Matrix<T, 3, 3> &rmat) {
Eigen::AngleAxis<T> angleaxis;
angleaxis.fromRotationMatrix(rmat);
return angleaxis.angle() * angleaxis.axis();
}
template <typename T>
void QuaternionToAngleAxis(const T *const q, Eigen::Matrix<T, 3, 1> &rvec) {
Eigen::Quaternion<T> quat(q[3], q[0], q[1], q[2]);
Eigen::Matrix<T, 3, 3> rmat = quat.toRotationMatrix();
Eigen::AngleAxis<T> angleaxis;
angleaxis.fromRotationMatrix(rmat);
rvec = angleaxis.angle() * angleaxis.axis();
}
template <typename T>
Eigen::Matrix<T, 3, 3> QuaternionToRotation(const T *const q) {
T R[9];
ceres::QuaternionToRotation(q, R);
Eigen::Matrix<T, 3, 3> rmat;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
rmat(i, j) = R[i * 3 + j];
}
}
return rmat;
}
template <typename T>
void QuaternionToRotation(const T *const q, T *rot) {
ceres::QuaternionToRotation(q, rot);
}
template <typename T>
Eigen::Matrix<T, 4, 4> QuaternionMultMatLeft(const Eigen::Quaternion<T> &q) {
return (Eigen::Matrix<T, 4, 4>() << q.w(), -q.z(), q.y(), q.x(), q.z(), q.w(),
-q.x(), q.y(), -q.y(), q.x(), q.w(), q.z(), -q.x(), -q.y(), -q.z(),
q.w())
.finished();
}
template <typename T>
Eigen::Matrix<T, 4, 4> QuaternionMultMatRight(const Eigen::Quaternion<T> &q) {
return (Eigen::Matrix<T, 4, 4>() << q.w(), q.z(), -q.y(), q.x(), -q.z(),
q.w(), q.x(), q.y(), q.y(), -q.x(), q.w(), q.z(), -q.x(), -q.y(),
-q.z(), q.w())
.finished();
}
/// @param theta - rotation about screw axis
/// @param d - projection of tvec on the rotation axis
/// @param l - screw axis direction
/// @param m - screw axis moment
template <typename T>
void AngleAxisAndTranslationToScrew(
const Eigen::Matrix<T, 3, 1> &rvec, const Eigen::Matrix<T, 3, 1> &tvec,
T &theta, T &d, Eigen::Matrix<T, 3, 1> &l, Eigen::Matrix<T, 3, 1> &m) {
theta = rvec.norm();
if (theta == 0) {
l.setZero();
m.setZero();
std::cout << "Warning: Undefined screw! Returned 0. " << std::endl;
}
l = rvec.normalized();
Eigen::Matrix<T, 3, 1> t = tvec;
d = t.transpose() * l;
// point on screw axis - projection of origin on screw axis
Eigen::Matrix<T, 3, 1> c;
c = 0.5 * (t - d * l + (1.0 / tan(theta / 2.0) * l).cross(t));
// c and hence the screw axis is not defined if theta is either 0 or M_PI
m = c.cross(l);
}
template <typename T>
Eigen::Matrix<T, 3, 3> RPY2mat(T roll, T pitch, T yaw) {
Eigen::Matrix<T, 3, 3> m;
T cr = cos(roll);
T sr = sin(roll);
T cp = cos(pitch);
T sp = sin(pitch);
T cy = cos(yaw);
T sy = sin(yaw);
m(0, 0) = cy * cp;
m(0, 1) = cy * sp * sr - sy * cr;
m(0, 2) = cy * sp * cr + sy * sr;
m(1, 0) = sy * cp;
m(1, 1) = sy * sp * sr + cy * cr;
m(1, 2) = sy * sp * cr - cy * sr;
m(2, 0) = -sp;
m(2, 1) = cp * sr;
m(2, 2) = cp * cr;
return m;
}
template <typename T>
void mat2RPY(const Eigen::Matrix<T, 3, 3> &m, T &roll, T &pitch, T &yaw) {
roll = atan2(m(2, 1), m(2, 2));
pitch = atan2(-m(2, 0), sqrt(m(2, 1) * m(2, 1) + m(2, 2) * m(2, 2)));
yaw = atan2(m(1, 0), m(0, 0));
}
template <typename T>
Eigen::Matrix<T, 4, 4> homogeneousTransform(
const Eigen::Matrix<T, 3, 3> &R, const Eigen::Matrix<T, 3, 1> &t) {
Eigen::Matrix<T, 4, 4> H;
H.setIdentity();
H.block(0, 0, 3, 3) = R;
H.block(0, 3, 3, 1) = t;
return H;
}
template <typename T>
Eigen::Matrix<T, 4, 4> poseWithCartesianTranslation(
const T *const q, const T *const p) {
Eigen::Matrix<T, 4, 4> pose = Eigen::Matrix<T, 4, 4>::Identity();
T rotation[9];
ceres::QuaternionToRotation(q, rotation);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
pose(i, j) = rotation[i * 3 + j];
}
}
pose(0, 3) = p[0];
pose(1, 3) = p[1];
pose(2, 3) = p[2];
return pose;
}
template <typename T>
Eigen::Matrix<T, 4, 4> poseWithSphericalTranslation(
const T *const q, const T *const p, const T scale = T(1.0)) {
Eigen::Matrix<T, 4, 4> pose = Eigen::Matrix<T, 4, 4>::Identity();
T rotation[9];
ceres::QuaternionToRotation(q, rotation);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
pose(i, j) = rotation[i * 3 + j];
}
}
T theta = p[0];
T phi = p[1];
pose(0, 3) = sin(theta) * cos(phi) * scale;
pose(1, 3) = sin(theta) * sin(phi) * scale;
pose(2, 3) = cos(theta) * scale;
return pose;
}
// Returns the Sampson error of a given essential matrix and 2 image points
template <typename T>
T sampsonError(
const Eigen::Matrix<T, 3, 3> &E, const Eigen::Matrix<T, 3, 1> &p1,
const Eigen::Matrix<T, 3, 1> &p2) {
Eigen::Matrix<T, 3, 1> Ex1 = E * p1;
Eigen::Matrix<T, 3, 1> Etx2 = E.transpose() * p2;
T x2tEx1 = p2.dot(Ex1);
// compute Sampson error
T err = square(x2tEx1) / (square(Ex1(0, 0)) + square(Ex1(1, 0)) +
square(Etx2(0, 0)) + square(Etx2(1, 0)));
return err;
}
// Returns the Sampson error of a given rotation/translation and 2 image points
template <typename T>
T sampsonError(
const Eigen::Matrix<T, 3, 3> &R, const Eigen::Matrix<T, 3, 1> &t,
const Eigen::Matrix<T, 3, 1> &p1, const Eigen::Matrix<T, 3, 1> &p2) {
// construct essential matrix
Eigen::Matrix<T, 3, 3> E = skew(t) * R;
Eigen::Matrix<T, 3, 1> Ex1 = E * p1;
Eigen::Matrix<T, 3, 1> Etx2 = E.transpose() * p2;
T x2tEx1 = p2.dot(Ex1);
// compute Sampson error
T err = square(x2tEx1) / (square(Ex1(0, 0)) + square(Ex1(1, 0)) +
square(Etx2(0, 0)) + square(Etx2(1, 0)));
return err;
}
// Returns the Sampson error of a given rotation/translation and 2 image points
template <typename T>
T sampsonError(
const Eigen::Matrix<T, 4, 4> &H, const Eigen::Matrix<T, 3, 1> &p1,
const Eigen::Matrix<T, 3, 1> &p2) {
Eigen::Matrix<T, 3, 3> R = H.block(0, 0, 3, 3);
Eigen::Matrix<T, 3, 1> t = H.block(0, 3, 3, 1);
return sampsonError(R, t, p1, p2);
}
template <typename T>
Eigen::Matrix<T, 3, 1> transformPoint(
const Eigen::Matrix<T, 4, 4> &H, const Eigen::Matrix<T, 3, 1> &P) {
Eigen::Matrix<T, 3, 1> P_trans =
H.block(0, 0, 3, 3) * P + H.block(0, 3, 3, 1);
return P_trans;
}
template <typename T>
Eigen::Matrix<T, 4, 4> estimate3DRigidTransform(
const std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
&points1,
const std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
&points2) {
// compute centroids
Eigen::Matrix<T, 3, 1> c1, c2;
c1.setZero();
c2.setZero();
for (size_t i = 0; i < points1.size(); ++i) {
c1 += points1.at(i);
c2 += points2.at(i);
}
c1 /= points1.size();
c2 /= points1.size();
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> X(3, points1.size());
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> Y(3, points1.size());
for (size_t i = 0; i < points1.size(); ++i) {
X.col(i) = points1.at(i) - c1;
Y.col(i) = points2.at(i) - c2;
}
Eigen::Matrix<T, 3, 3> H = X * Y.transpose();
Eigen::JacobiSVD<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > svd(
H, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix<T, 3, 3> U = svd.matrixU();
Eigen::Matrix<T, 3, 3> V = svd.matrixV();
if (U.determinant() * V.determinant() < 0.0) {
V.col(2) *= -1.0;
}
Eigen::Matrix<T, 3, 3> R = V * U.transpose();
Eigen::Matrix<T, 3, 1> t = c2 - R * c1;
return homogeneousTransform(R, t);
}
template <typename T>
Eigen::Matrix<T, 4, 4> estimate3DRigidSimilarityTransform(
const std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
&points1,
const std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
&points2) {
// compute centroids
Eigen::Matrix<T, 3, 1> c1, c2;
c1.setZero();
c2.setZero();
for (size_t i = 0; i < points1.size(); ++i) {
c1 += points1.at(i);
c2 += points2.at(i);
}
c1 /= points1.size();
c2 /= points1.size();
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> X(3, points1.size());
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> Y(3, points1.size());
for (size_t i = 0; i < points1.size(); ++i) {
X.col(i) = points1.at(i) - c1;
Y.col(i) = points2.at(i) - c2;
}
Eigen::Matrix<T, 3, 3> H = X * Y.transpose();
Eigen::JacobiSVD<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > svd(
H, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix<T, 3, 3> U = svd.matrixU();
Eigen::Matrix<T, 3, 3> V = svd.matrixV();
if (U.determinant() * V.determinant() < 0.0) {
V.col(2) *= -1.0;
}
Eigen::Matrix<T, 3, 3> R = V * U.transpose();
std::vector<Eigen::Matrix<T, 3, 1>,
Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >
rotatedPoints1(points1.size());
for (size_t i = 0; i < points1.size(); ++i) {
rotatedPoints1.at(i) = R * (points1.at(i) - c1);
}
double sum_ss = 0.0, sum_tt = 0.0;
for (size_t i = 0; i < points1.size(); ++i) {
sum_ss += (points1.at(i) - c1).squaredNorm();
sum_tt += (points2.at(i) - c2).dot(rotatedPoints1.at(i));
}
double scale = sum_tt / sum_ss;
Eigen::Matrix<T, 3, 3> sR = scale * R;
Eigen::Matrix<T, 3, 1> t = c2 - sR * c1;
return homogeneousTransform(sR, t);
}
}
#endif

View File

@@ -1,35 +0,0 @@
#ifndef TRANSFORM_H
#define TRANSFORM_H
#include <boost/shared_ptr.hpp>
#include "eigen3/Eigen/Dense"
#include <stdint.h>
namespace camodocal {
class Transform {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Transform();
Transform(const Eigen::Matrix4d &H);
Eigen::Quaterniond &rotation(void);
const Eigen::Quaterniond &rotation(void) const;
double *rotationData(void);
const double *const rotationData(void) const;
Eigen::Vector3d &translation(void);
const Eigen::Vector3d &translation(void) const;
double *translationData(void);
const double *const translationData(void) const;
Eigen::Matrix4d toMatrix(void) const;
private:
Eigen::Quaterniond m_q;
Eigen::Vector3d m_t;
};
}
#endif

View File

@@ -1,493 +0,0 @@
#include "camodocal/calib/CameraCalibration.h"
#include <algorithm>
#include <cstdio>
#include "eigen3/Eigen/Dense"
#include <fstream>
#include <iomanip>
#include <iostream>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "camodocal/camera_models/CameraFactory.h"
#include "camodocal/camera_models/CostFunctionFactory.h"
#include "camodocal/gpl/EigenQuaternionParameterization.h"
#include "camodocal/gpl/EigenUtils.h"
#include "camodocal/sparse_graph/Transform.h"
#include "ceres/ceres.h"
namespace camodocal {
CameraCalibration::CameraCalibration()
: m_boardSize(cv::Size(0, 0)), m_squareSize(0.0f), m_verbose(false) {}
CameraCalibration::CameraCalibration(
const Camera::ModelType modelType, const std::string &cameraName,
const cv::Size &imageSize, const cv::Size &boardSize, float squareSize)
: m_boardSize(boardSize), m_squareSize(squareSize), m_verbose(false) {
m_camera = CameraFactory::instance()->generateCamera(
modelType, cameraName, imageSize);
}
void CameraCalibration::clear(void) {
m_imagePoints.clear();
m_scenePoints.clear();
}
void CameraCalibration::addChessboardData(
const std::vector<cv::Point2f> &corners) {
m_imagePoints.push_back(corners);
std::vector<cv::Point3f> scenePointsInView;
for (int i = 0; i < m_boardSize.height; ++i) {
for (int j = 0; j < m_boardSize.width; ++j) {
scenePointsInView.push_back(
cv::Point3f(i * m_squareSize, j * m_squareSize, 0.0));
}
}
m_scenePoints.push_back(scenePointsInView);
}
bool CameraCalibration::calibrate(void) {
int imageCount = m_imagePoints.size();
// compute intrinsic camera parameters and extrinsic parameters for each of
// the views
std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
bool ret = calibrateHelper(m_camera, rvecs, tvecs);
m_cameraPoses = cv::Mat(imageCount, 6, CV_64F);
for (int i = 0; i < imageCount; ++i) {
m_cameraPoses.at<double>(i, 0) = rvecs.at(i).at<double>(0);
m_cameraPoses.at<double>(i, 1) = rvecs.at(i).at<double>(1);
m_cameraPoses.at<double>(i, 2) = rvecs.at(i).at<double>(2);
m_cameraPoses.at<double>(i, 3) = tvecs.at(i).at<double>(0);
m_cameraPoses.at<double>(i, 4) = tvecs.at(i).at<double>(1);
m_cameraPoses.at<double>(i, 5) = tvecs.at(i).at<double>(2);
}
// Compute measurement covariance.
std::vector<std::vector<cv::Point2f> > errVec(m_imagePoints.size());
Eigen::Vector2d errSum = Eigen::Vector2d::Zero();
size_t errCount = 0;
for (size_t i = 0; i < m_imagePoints.size(); ++i) {
std::vector<cv::Point2f> estImagePoints;
m_camera->projectPoints(
m_scenePoints.at(i), rvecs.at(i), tvecs.at(i), estImagePoints);
for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) {
cv::Point2f pObs = m_imagePoints.at(i).at(j);
cv::Point2f pEst = estImagePoints.at(j);
cv::Point2f err = pObs - pEst;
errVec.at(i).push_back(err);
errSum += Eigen::Vector2d(err.x, err.y);
}
errCount += m_imagePoints.at(i).size();
}
Eigen::Vector2d errMean = errSum / static_cast<double>(errCount);
Eigen::Matrix2d measurementCovariance = Eigen::Matrix2d::Zero();
for (size_t i = 0; i < errVec.size(); ++i) {
for (size_t j = 0; j < errVec.at(i).size(); ++j) {
cv::Point2f err = errVec.at(i).at(j);
double d0 = err.x - errMean(0);
double d1 = err.y - errMean(1);
measurementCovariance(0, 0) += d0 * d0;
measurementCovariance(0, 1) += d0 * d1;
measurementCovariance(1, 1) += d1 * d1;
}
}
measurementCovariance /= static_cast<double>(errCount);
measurementCovariance(1, 0) = measurementCovariance(0, 1);
m_measurementCovariance = measurementCovariance;
return ret;
}
int CameraCalibration::sampleCount(void) const {
return m_imagePoints.size();
}
std::vector<std::vector<cv::Point2f> > &CameraCalibration::imagePoints(void) {
return m_imagePoints;
}
const std::vector<std::vector<cv::Point2f> > &CameraCalibration::imagePoints(
void) const {
return m_imagePoints;
}
std::vector<std::vector<cv::Point3f> > &CameraCalibration::scenePoints(void) {
return m_scenePoints;
}
const std::vector<std::vector<cv::Point3f> > &CameraCalibration::scenePoints(
void) const {
return m_scenePoints;
}
CameraPtr &CameraCalibration::camera(void) {
return m_camera;
}
const CameraConstPtr CameraCalibration::camera(void) const {
return m_camera;
}
Eigen::Matrix2d &CameraCalibration::measurementCovariance(void) {
return m_measurementCovariance;
}
const Eigen::Matrix2d &CameraCalibration::measurementCovariance(void) const {
return m_measurementCovariance;
}
cv::Mat &CameraCalibration::cameraPoses(void) {
return m_cameraPoses;
}
const cv::Mat &CameraCalibration::cameraPoses(void) const {
return m_cameraPoses;
}
void CameraCalibration::drawResults(std::vector<cv::Mat> &images) const {
std::vector<cv::Mat> rvecs, tvecs;
for (size_t i = 0; i < images.size(); ++i) {
cv::Mat rvec(3, 1, CV_64F);
rvec.at<double>(0) = m_cameraPoses.at<double>(i, 0);
rvec.at<double>(1) = m_cameraPoses.at<double>(i, 1);
rvec.at<double>(2) = m_cameraPoses.at<double>(i, 2);
cv::Mat tvec(3, 1, CV_64F);
tvec.at<double>(0) = m_cameraPoses.at<double>(i, 3);
tvec.at<double>(1) = m_cameraPoses.at<double>(i, 4);
tvec.at<double>(2) = m_cameraPoses.at<double>(i, 5);
rvecs.push_back(rvec);
tvecs.push_back(tvec);
}
int drawShiftBits = 4;
int drawMultiplier = 1 << drawShiftBits;
cv::Scalar green(0, 255, 0);
cv::Scalar red(0, 0, 255);
for (size_t i = 0; i < images.size(); ++i) {
cv::Mat &image = images.at(i);
if (image.channels() == 1) {
cv::cvtColor(image, image, CV_GRAY2RGB);
}
std::vector<cv::Point2f> estImagePoints;
m_camera->projectPoints(
m_scenePoints.at(i), rvecs.at(i), tvecs.at(i), estImagePoints);
float errorSum = 0.0f;
float errorMax = std::numeric_limits<float>::min();
for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) {
cv::Point2f pObs = m_imagePoints.at(i).at(j);
cv::Point2f pEst = estImagePoints.at(j);
cv::circle(
image, cv::Point(
cvRound(pObs.x * drawMultiplier),
cvRound(pObs.y * drawMultiplier)),
5, green, 2, CV_AA, drawShiftBits);
cv::circle(
image, cv::Point(
cvRound(pEst.x * drawMultiplier),
cvRound(pEst.y * drawMultiplier)),
5, red, 2, CV_AA, drawShiftBits);
float error = cv::norm(pObs - pEst);
errorSum += error;
if (error > errorMax) {
errorMax = error;
}
}
std::ostringstream oss;
oss << "Reprojection error: avg = " << errorSum / m_imagePoints.at(i).size()
<< " max = " << errorMax;
cv::putText(
image, oss.str(), cv::Point(10, image.rows - 10),
cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(255, 255, 255), 1, CV_AA);
}
}
void CameraCalibration::writeParams(const std::string &filename) const {
m_camera->writeParametersToYamlFile(filename);
}
bool CameraCalibration::writeChessboardData(const std::string &filename) const {
std::ofstream ofs(filename.c_str(), std::ios::out | std::ios::binary);
if (!ofs.is_open()) {
return false;
}
writeData(ofs, m_boardSize.width);
writeData(ofs, m_boardSize.height);
writeData(ofs, m_squareSize);
writeData(ofs, m_measurementCovariance(0, 0));
writeData(ofs, m_measurementCovariance(0, 1));
writeData(ofs, m_measurementCovariance(1, 0));
writeData(ofs, m_measurementCovariance(1, 1));
writeData(ofs, m_cameraPoses.rows);
writeData(ofs, m_cameraPoses.cols);
writeData(ofs, m_cameraPoses.type());
for (int i = 0; i < m_cameraPoses.rows; ++i) {
for (int j = 0; j < m_cameraPoses.cols; ++j) {
writeData(ofs, m_cameraPoses.at<double>(i, j));
}
}
writeData(ofs, m_imagePoints.size());
for (size_t i = 0; i < m_imagePoints.size(); ++i) {
writeData(ofs, m_imagePoints.at(i).size());
for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) {
const cv::Point2f &ipt = m_imagePoints.at(i).at(j);
writeData(ofs, ipt.x);
writeData(ofs, ipt.y);
}
}
writeData(ofs, m_scenePoints.size());
for (size_t i = 0; i < m_scenePoints.size(); ++i) {
writeData(ofs, m_scenePoints.at(i).size());
for (size_t j = 0; j < m_scenePoints.at(i).size(); ++j) {
const cv::Point3f &spt = m_scenePoints.at(i).at(j);
writeData(ofs, spt.x);
writeData(ofs, spt.y);
writeData(ofs, spt.z);
}
}
return true;
}
bool CameraCalibration::readChessboardData(const std::string &filename) {
std::ifstream ifs(filename.c_str(), std::ios::in | std::ios::binary);
if (!ifs.is_open()) {
return false;
}
readData(ifs, m_boardSize.width);
readData(ifs, m_boardSize.height);
readData(ifs, m_squareSize);
readData(ifs, m_measurementCovariance(0, 0));
readData(ifs, m_measurementCovariance(0, 1));
readData(ifs, m_measurementCovariance(1, 0));
readData(ifs, m_measurementCovariance(1, 1));
int rows, cols, type;
readData(ifs, rows);
readData(ifs, cols);
readData(ifs, type);
m_cameraPoses = cv::Mat(rows, cols, type);
for (int i = 0; i < m_cameraPoses.rows; ++i) {
for (int j = 0; j < m_cameraPoses.cols; ++j) {
readData(ifs, m_cameraPoses.at<double>(i, j));
}
}
size_t nImagePointSets;
readData(ifs, nImagePointSets);
m_imagePoints.clear();
m_imagePoints.resize(nImagePointSets);
for (size_t i = 0; i < m_imagePoints.size(); ++i) {
size_t nImagePoints;
readData(ifs, nImagePoints);
m_imagePoints.at(i).resize(nImagePoints);
for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) {
cv::Point2f &ipt = m_imagePoints.at(i).at(j);
readData(ifs, ipt.x);
readData(ifs, ipt.y);
}
}
size_t nScenePointSets;
readData(ifs, nScenePointSets);
m_scenePoints.clear();
m_scenePoints.resize(nScenePointSets);
for (size_t i = 0; i < m_scenePoints.size(); ++i) {
size_t nScenePoints;
readData(ifs, nScenePoints);
m_scenePoints.at(i).resize(nScenePoints);
for (size_t j = 0; j < m_scenePoints.at(i).size(); ++j) {
cv::Point3f &spt = m_scenePoints.at(i).at(j);
readData(ifs, spt.x);
readData(ifs, spt.y);
readData(ifs, spt.z);
}
}
return true;
}
void CameraCalibration::setVerbose(bool verbose) {
m_verbose = verbose;
}
bool CameraCalibration::calibrateHelper(
CameraPtr &camera, std::vector<cv::Mat> &rvecs,
std::vector<cv::Mat> &tvecs) const {
rvecs.assign(m_scenePoints.size(), cv::Mat());
tvecs.assign(m_scenePoints.size(), cv::Mat());
// STEP 1: Estimate intrinsics
camera->estimateIntrinsics(m_boardSize, m_scenePoints, m_imagePoints);
// STEP 2: Estimate extrinsics
for (size_t i = 0; i < m_scenePoints.size(); ++i) {
camera->estimateExtrinsics(
m_scenePoints.at(i), m_imagePoints.at(i), rvecs.at(i), tvecs.at(i));
}
if (m_verbose) {
std::cout << "[" << camera->cameraName() << "] "
<< "# INFO: "
<< "Initial reprojection error: " << std::fixed
<< std::setprecision(3)
<< camera->reprojectionError(
m_scenePoints, m_imagePoints, rvecs, tvecs)
<< " pixels" << std::endl;
}
// STEP 3: optimization using ceres
optimize(camera, rvecs, tvecs);
if (m_verbose) {
double err =
camera->reprojectionError(m_scenePoints, m_imagePoints, rvecs, tvecs);
std::cout << "[" << camera->cameraName() << "] "
<< "# INFO: Final reprojection error: " << err << " pixels"
<< std::endl;
std::cout << "[" << camera->cameraName() << "] "
<< "# INFO: " << camera->parametersToString() << std::endl;
}
return true;
}
void CameraCalibration::optimize(
CameraPtr &camera, std::vector<cv::Mat> &rvecs,
std::vector<cv::Mat> &tvecs) const {
// Use ceres to do optimization
ceres::Problem problem;
std::vector<Transform, Eigen::aligned_allocator<Transform> > transformVec(
rvecs.size());
for (size_t i = 0; i < rvecs.size(); ++i) {
Eigen::Vector3d rvec;
cv::cv2eigen(rvecs.at(i), rvec);
transformVec.at(i).rotation() =
Eigen::AngleAxisd(rvec.norm(), rvec.normalized());
transformVec.at(i).translation() << tvecs[i].at<double>(0),
tvecs[i].at<double>(1), tvecs[i].at<double>(2);
}
std::vector<double> intrinsicCameraParams;
m_camera->writeParameters(intrinsicCameraParams);
// create residuals for each observation
for (size_t i = 0; i < m_imagePoints.size(); ++i) {
for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) {
const cv::Point3f &spt = m_scenePoints.at(i).at(j);
const cv::Point2f &ipt = m_imagePoints.at(i).at(j);
ceres::CostFunction *costFunction =
CostFunctionFactory::instance()->generateCostFunction(
camera, Eigen::Vector3d(spt.x, spt.y, spt.z),
Eigen::Vector2d(ipt.x, ipt.y), CAMERA_INTRINSICS | CAMERA_POSE);
ceres::LossFunction *lossFunction = new ceres::CauchyLoss(1.0);
problem.AddResidualBlock(
costFunction, lossFunction, intrinsicCameraParams.data(),
transformVec.at(i).rotationData(),
transformVec.at(i).translationData());
}
ceres::LocalParameterization *quaternionParameterization =
new EigenQuaternionParameterization;
problem.SetParameterization(
transformVec.at(i).rotationData(), quaternionParameterization);
}
std::cout << "begin ceres" << std::endl;
ceres::Solver::Options options;
options.max_num_iterations = 1000;
options.num_threads = 1;
if (m_verbose) {
options.minimizer_progress_to_stdout = true;
}
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
std::cout << "end ceres" << std::endl;
if (m_verbose) {
std::cout << summary.FullReport() << std::endl;
}
camera->readParameters(intrinsicCameraParams);
for (size_t i = 0; i < rvecs.size(); ++i) {
Eigen::AngleAxisd aa(transformVec.at(i).rotation());
Eigen::Vector3d rvec = aa.angle() * aa.axis();
cv::eigen2cv(rvec, rvecs.at(i));
cv::Mat &tvec = tvecs.at(i);
tvec.at<double>(0) = transformVec.at(i).translation()(0);
tvec.at<double>(1) = transformVec.at(i).translation()(1);
tvec.at<double>(2) = transformVec.at(i).translation()(2);
}
}
template <typename T>
void CameraCalibration::readData(std::ifstream &ifs, T &data) const {
char *buffer = new char[sizeof(T)];
ifs.read(buffer, sizeof(T));
data = *(reinterpret_cast<T *>(buffer));
delete[] buffer;
}
template <typename T>
void CameraCalibration::writeData(std::ofstream &ofs, T data) const {
char *pData = reinterpret_cast<char *>(&data);
ofs.write(pData, sizeof(T));
}
}

View File

@@ -1,370 +0,0 @@
#include "camodocal/calib/StereoCameraCalibration.h"
#include <boost/filesystem.hpp>
#include <opencv2/core/eigen.hpp>
#include "camodocal/EigenUtils.h"
#include "camodocal/camera_models/CameraFactory.h"
#include "camodocal/camera_models/CostFunctionFactory.h"
#include "camodocal/gpl/EigenQuaternionParameterization.h"
#include "ceres/ceres.h"
namespace camodocal {
StereoCameraCalibration::StereoCameraCalibration(
Camera::ModelType modelType, const std::string &cameraLeftName,
const std::string &cameraRightName, const cv::Size &imageSize,
const cv::Size &boardSize, float squareSize)
: m_calibLeft(modelType, cameraLeftName, imageSize, boardSize, squareSize),
m_calibRight(
modelType, cameraRightName, imageSize, boardSize, squareSize),
m_verbose(false) {
stereo_error.resize(2, 0.0);
}
void StereoCameraCalibration::clear(void) {
m_calibLeft.clear();
m_calibRight.clear();
}
void StereoCameraCalibration::addChessboardData(
const std::vector<cv::Point2f> &cornersLeft,
const std::vector<cv::Point2f> &cornersRight) {
m_calibLeft.addChessboardData(cornersLeft);
m_calibRight.addChessboardData(cornersRight);
}
bool StereoCameraCalibration::calibrate(void) {
// calibrate cameras individually
if (!m_calibLeft.calibrate()) {
return false;
}
std::cout << "left_calibrate complete." << std::endl;
if (!m_calibRight.calibrate()) {
return false;
}
std::cout << "right_calibrate complete." << std::endl;
// perform stereo calibration
int imageCount = imagePointsLeft().size();
// find best estimate for initial transform from left camera frame to right
// camera frame
double minReprojErr = std::numeric_limits<double>::max();
for (int i = 0; i < imageCount; ++i) {
Eigen::Vector3d rvec;
rvec << m_calibLeft.cameraPoses().at<double>(i, 0),
m_calibLeft.cameraPoses().at<double>(i, 1),
m_calibLeft.cameraPoses().at<double>(i, 2);
Eigen::Quaterniond q_l = AngleAxisToQuaternion(rvec);
Eigen::Vector3d t_l;
t_l << m_calibLeft.cameraPoses().at<double>(i, 3),
m_calibLeft.cameraPoses().at<double>(i, 4),
m_calibLeft.cameraPoses().at<double>(i, 5);
rvec << m_calibRight.cameraPoses().at<double>(i, 0),
m_calibRight.cameraPoses().at<double>(i, 1),
m_calibRight.cameraPoses().at<double>(i, 2);
Eigen::Quaterniond q_r = AngleAxisToQuaternion(rvec);
Eigen::Vector3d t_r;
t_r << m_calibRight.cameraPoses().at<double>(i, 3),
m_calibRight.cameraPoses().at<double>(i, 4),
m_calibRight.cameraPoses().at<double>(i, 5);
Eigen::Quaterniond q_l_r = q_r * q_l.conjugate();
Eigen::Vector3d t_l_r = -q_l_r.toRotationMatrix() * t_l + t_r;
std::vector<cv::Mat> rvecs(imageCount);
std::vector<cv::Mat> tvecs(imageCount);
for (int j = 0; j < imageCount; ++j) {
rvec << m_calibLeft.cameraPoses().at<double>(j, 0),
m_calibLeft.cameraPoses().at<double>(j, 1),
m_calibLeft.cameraPoses().at<double>(j, 2);
q_l = AngleAxisToQuaternion(rvec);
t_l << m_calibLeft.cameraPoses().at<double>(j, 3),
m_calibLeft.cameraPoses().at<double>(j, 4),
m_calibLeft.cameraPoses().at<double>(j, 5);
Eigen::Quaterniond q_r = q_l_r * q_l;
Eigen::Vector3d t_r = q_l_r.toRotationMatrix() * t_l + t_l_r;
QuaternionToAngleAxis(q_r.coeffs().data(), rvec);
cv::eigen2cv(rvec, rvecs.at(j));
cv::eigen2cv(t_r, tvecs.at(j));
}
double reprojErr = cameraRight()->reprojectionError(
scenePoints(), imagePointsRight(), rvecs, tvecs);
if (reprojErr < minReprojErr) {
minReprojErr = reprojErr;
m_q = q_l_r;
m_t = t_l_r;
}
}
std::vector<cv::Mat> rvecsL(imageCount);
std::vector<cv::Mat> tvecsL(imageCount);
std::vector<cv::Mat> rvecsR(imageCount);
std::vector<cv::Mat> tvecsR(imageCount);
double *extrinsicCameraLParams[scenePoints().size()];
for (int i = 0; i < imageCount; ++i) {
extrinsicCameraLParams[i] = new double[7];
Eigen::Vector3d rvecL(
m_calibLeft.cameraPoses().at<double>(i, 0),
m_calibLeft.cameraPoses().at<double>(i, 1),
m_calibLeft.cameraPoses().at<double>(i, 2));
AngleAxisToQuaternion(rvecL, extrinsicCameraLParams[i]);
extrinsicCameraLParams[i][4] = m_calibLeft.cameraPoses().at<double>(i, 3);
extrinsicCameraLParams[i][5] = m_calibLeft.cameraPoses().at<double>(i, 4);
extrinsicCameraLParams[i][6] = m_calibLeft.cameraPoses().at<double>(i, 5);
cv::eigen2cv(rvecL, rvecsL.at(i));
Eigen::Vector3d tvecL;
tvecL << m_calibLeft.cameraPoses().at<double>(i, 3),
m_calibLeft.cameraPoses().at<double>(i, 4),
m_calibLeft.cameraPoses().at<double>(i, 5);
cv::eigen2cv(tvecL, tvecsL.at(i));
Eigen::Quaterniond q_r = m_q * AngleAxisToQuaternion(rvecL);
Eigen::Vector3d t_r = m_q.toRotationMatrix() * tvecL + m_t;
Eigen::Vector3d rvecR;
QuaternionToAngleAxis(q_r.coeffs().data(), rvecR);
cv::eigen2cv(rvecR, rvecsR.at(i));
cv::eigen2cv(t_r, tvecsR.at(i));
}
if (m_verbose) {
double roll, pitch, yaw;
mat2RPY(m_q.toRotationMatrix(), roll, pitch, yaw);
std::cout << "[stereo]"
<< "# INFO: Initial extrinsics: " << std::endl
<< "r: " << roll << " p: " << pitch << " yaw: " << yaw
<< std::endl
<< "x: " << m_t(0) << " y: " << m_t(1) << " z: " << m_t(2)
<< std::endl;
double error = cameraLeft()->reprojectionError(
scenePoints(), imagePointsLeft(), rvecsL, tvecsL);
std::cout << "[" << cameraLeft()->cameraName() << "] "
<< "# INFO: Initial reprojection error: " << error << " pixels"
<< std::endl;
error = cameraRight()->reprojectionError(
scenePoints(), imagePointsRight(), rvecsR, tvecsR);
std::cout << "[" << cameraRight()->cameraName() << "] "
<< "# INFO: Initial reprojection error: " << error << " pixels"
<< std::endl;
}
std::vector<double> intrinsicCameraLParams;
cameraLeft()->writeParameters(intrinsicCameraLParams);
std::vector<double> intrinsicCameraRParams;
cameraRight()->writeParameters(intrinsicCameraRParams);
ceres::Problem problem;
for (int i = 0; i < imageCount; ++i) {
for (size_t j = 0; j < scenePoints().at(i).size(); ++j) {
const cv::Point3f &spt = scenePoints().at(i).at(j);
const cv::Point2f &iptL = imagePointsLeft().at(i).at(j);
const cv::Point2f &iptR = imagePointsRight().at(i).at(j);
ceres::CostFunction *costFunction =
CostFunctionFactory::instance()->generateCostFunction(
cameraLeft(), cameraRight(), Eigen::Vector3d(spt.x, spt.y, spt.z),
Eigen::Vector2d(iptL.x, iptL.y), Eigen::Vector2d(iptR.x, iptR.y));
ceres::LossFunction *lossFunction = new ceres::CauchyLoss(1.0);
problem.AddResidualBlock(
costFunction, lossFunction, intrinsicCameraLParams.data(),
intrinsicCameraRParams.data(), extrinsicCameraLParams[i],
extrinsicCameraLParams[i] + 4, m_q.coeffs().data(), m_t.data());
}
}
for (int i = 0; i < imageCount; ++i) {
ceres::LocalParameterization *quaternionParameterization =
new EigenQuaternionParameterization;
problem.SetParameterization(
extrinsicCameraLParams[i], quaternionParameterization);
}
ceres::LocalParameterization *quaternionParameterization =
new EigenQuaternionParameterization;
problem.SetParameterization(m_q.coeffs().data(), quaternionParameterization);
ceres::Solver::Options options;
options.max_num_iterations = 1000;
options.num_threads = 8;
if (m_verbose) {
options.minimizer_progress_to_stdout = true;
}
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
if (m_verbose) {
std::cout << summary.FullReport() << "\n";
}
cameraLeft()->readParameters(intrinsicCameraLParams);
cameraRight()->readParameters(intrinsicCameraRParams);
for (int i = 0; i < imageCount; ++i) {
Eigen::Vector3d rvecL;
QuaternionToAngleAxis(extrinsicCameraLParams[i], rvecL);
m_calibLeft.cameraPoses().at<double>(i, 0) = rvecL(0);
m_calibLeft.cameraPoses().at<double>(i, 1) = rvecL(1);
m_calibLeft.cameraPoses().at<double>(i, 2) = rvecL(2);
m_calibLeft.cameraPoses().at<double>(i, 3) = extrinsicCameraLParams[i][4];
m_calibLeft.cameraPoses().at<double>(i, 4) = extrinsicCameraLParams[i][5];
m_calibLeft.cameraPoses().at<double>(i, 5) = extrinsicCameraLParams[i][6];
cv::eigen2cv(rvecL, rvecsL.at(i));
Eigen::Vector3d tvecL;
tvecL << extrinsicCameraLParams[i][4], extrinsicCameraLParams[i][5],
extrinsicCameraLParams[i][6];
cv::eigen2cv(tvecL, tvecsL.at(i));
Eigen::Quaterniond q_r = m_q * AngleAxisToQuaternion(rvecL);
Eigen::Vector3d t_r = m_q.toRotationMatrix() * tvecL + m_t;
Eigen::Vector3d rvecR;
QuaternionToAngleAxis(q_r.coeffs().data(), rvecR);
m_calibRight.cameraPoses().at<double>(i, 0) = rvecR(0);
m_calibRight.cameraPoses().at<double>(i, 1) = rvecR(1);
m_calibRight.cameraPoses().at<double>(i, 2) = rvecR(2);
m_calibRight.cameraPoses().at<double>(i, 3) = t_r(0);
m_calibRight.cameraPoses().at<double>(i, 4) = t_r(1);
m_calibRight.cameraPoses().at<double>(i, 5) = t_r(2);
cv::eigen2cv(rvecR, rvecsR.at(i));
cv::eigen2cv(t_r, tvecsR.at(i));
}
if (m_verbose) {
double roll, pitch, yaw;
mat2RPY(m_q.toRotationMatrix(), roll, pitch, yaw);
std::cout << "[stereo]"
<< "# INFO: Final extrinsics: " << std::endl
<< "r: " << roll << " p: " << pitch << " yaw: " << yaw
<< std::endl
<< "x: " << m_t(0) << " y: " << m_t(1) << " z: " << m_t(2)
<< std::endl;
stereo_error[0] = cameraLeft()->reprojectionError(
scenePoints(), imagePointsLeft(), rvecsL, tvecsL);
std::cout << "[" << cameraLeft()->cameraName() << "] "
<< "# INFO: Final reprojection error: " << stereo_error[0] << " pixels"
<< std::endl;
std::cout << "[" << cameraLeft()->cameraName() << "] "
<< "# INFO: " << cameraLeft()->parametersToString() << std::endl;
stereo_error[1] = cameraRight()->reprojectionError(
scenePoints(), imagePointsRight(), rvecsR, tvecsR);
std::cout << "[" << cameraRight()->cameraName() << "] "
<< "# INFO: Final reprojection error: " << stereo_error[1] << " pixels"
<< std::endl;
std::cout << "[" << cameraRight()->cameraName() << "] "
<< "# INFO: " << cameraRight()->parametersToString() << std::endl;
}
return true;
}
int StereoCameraCalibration::sampleCount(void) const {
return m_calibLeft.sampleCount();
}
const std::vector<std::vector<cv::Point2f> >
&StereoCameraCalibration::imagePointsLeft(void) const {
return m_calibLeft.imagePoints();
}
const std::vector<std::vector<cv::Point2f> >
&StereoCameraCalibration::imagePointsRight(void) const {
return m_calibRight.imagePoints();
}
const std::vector<std::vector<cv::Point3f> >
&StereoCameraCalibration::scenePoints(void) const {
return m_calibLeft.scenePoints();
}
CameraPtr &StereoCameraCalibration::cameraLeft(void) {
return m_calibLeft.camera();
}
const CameraConstPtr StereoCameraCalibration::cameraLeft(void) const {
return m_calibLeft.camera();
}
CameraPtr &StereoCameraCalibration::cameraRight(void) {
return m_calibRight.camera();
}
const CameraConstPtr StereoCameraCalibration::cameraRight(void) const {
return m_calibRight.camera();
}
void StereoCameraCalibration::drawResults(
std::vector<cv::Mat> &imagesLeft, std::vector<cv::Mat> &imagesRight) const {
m_calibLeft.drawResults(imagesLeft);
m_calibRight.drawResults(imagesRight);
}
void StereoCameraCalibration::writeParams(const std::string &directory) const {
if (!boost::filesystem::exists(directory)) {
boost::filesystem::create_directory(directory);
}
cameraLeft()->writeParametersToYamlFile(directory + "/camera_left.yaml");
cameraRight()->writeParametersToYamlFile(directory + "/camera_right.yaml");
cv::FileStorage fs(directory + "/extrinsics.yaml", cv::FileStorage::WRITE);
fs << "transform";
fs << "{"
<< "q_x" << m_q.x() << "q_y" << m_q.y() << "q_z" << m_q.z() << "q_w"
<< m_q.w() << "t_x" << m_t(0) << "t_y" << m_t(1) << "t_z" << m_t(2) << "}";
fs.release();
cv::FileStorage error_file(directory + "/stereo_reprojection_error.yaml", cv::FileStorage::WRITE);
error_file << "left_reprojection_error" << stereo_error[0];
error_file << "right_reprojection_error" << stereo_error[1];
error_file.release();
}
void StereoCameraCalibration::setVerbose(bool verbose) {
m_verbose = verbose;
m_calibLeft.setVerbose(verbose);
m_calibRight.setVerbose(verbose);
}
}

View File

@@ -1,5 +1,4 @@
#include "camodocal/camera_models/Camera.h"
#include "camodocal/camera_models/ScaramuzzaCamera.h"
#include <opencv2/calib3d/calib3d.hpp>
@@ -14,9 +13,6 @@ Camera::Parameters::Parameters(ModelType modelType)
case PINHOLE:
m_nIntrinsics = 8;
break;
case SCARAMUZZA:
m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS;
break;
case MEI:
default:
m_nIntrinsics = 9;
@@ -36,9 +32,6 @@ Camera::Parameters::Parameters(
case PINHOLE:
m_nIntrinsics = 8;
break;
case SCARAMUZZA:
m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS;
break;
case MEI:
default:
m_nIntrinsics = 9;

View File

@@ -1,140 +0,0 @@
#include "camodocal/camera_models/CameraFactory.h"
#include <boost/algorithm/string.hpp>
#include "camodocal/camera_models/CataCamera.h"
#include "camodocal/camera_models/EquidistantCamera.h"
#include "camodocal/camera_models/PinholeCamera.h"
#include "camodocal/camera_models/ScaramuzzaCamera.h"
#include "ceres/ceres.h"
namespace camodocal {
boost::shared_ptr<CameraFactory> CameraFactory::m_instance;
CameraFactory::CameraFactory() {}
boost::shared_ptr<CameraFactory> CameraFactory::instance(void) {
if (m_instance.get() == 0) {
m_instance.reset(new CameraFactory);
}
return m_instance;
}
CameraPtr CameraFactory::generateCamera(
Camera::ModelType modelType, const std::string &cameraName,
cv::Size imageSize) const {
switch (modelType) {
case Camera::KANNALA_BRANDT: {
EquidistantCameraPtr camera(new EquidistantCamera);
EquidistantCamera::Parameters params = camera->getParameters();
params.cameraName() = cameraName;
params.imageWidth() = imageSize.width;
params.imageHeight() = imageSize.height;
camera->setParameters(params);
return camera;
}
case Camera::PINHOLE: {
PinholeCameraPtr camera(new PinholeCamera);
PinholeCamera::Parameters params = camera->getParameters();
params.cameraName() = cameraName;
params.imageWidth() = imageSize.width;
params.imageHeight() = imageSize.height;
camera->setParameters(params);
return camera;
}
case Camera::SCARAMUZZA: {
OCAMCameraPtr camera(new OCAMCamera);
OCAMCamera::Parameters params = camera->getParameters();
params.cameraName() = cameraName;
params.imageWidth() = imageSize.width;
params.imageHeight() = imageSize.height;
camera->setParameters(params);
return camera;
}
case Camera::MEI:
default: {
CataCameraPtr camera(new CataCamera);
CataCamera::Parameters params = camera->getParameters();
params.cameraName() = cameraName;
params.imageWidth() = imageSize.width;
params.imageHeight() = imageSize.height;
camera->setParameters(params);
return camera;
}
}
}
CameraPtr CameraFactory::generateCameraFromYamlFile(
const std::string &filename) {
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened()) {
std::cout << "# ERROR: can not open " << filename << std::endl;
return CameraPtr();
}
Camera::ModelType modelType = Camera::MEI;
if (!fs["model_type"].isNone()) {
std::string sModelType;
fs["model_type"] >> sModelType;
if (boost::iequals(sModelType, "kannala_brandt")) {
modelType = Camera::KANNALA_BRANDT;
} else if (boost::iequals(sModelType, "mei")) {
modelType = Camera::MEI;
} else if (boost::iequals(sModelType, "scaramuzza")) {
modelType = Camera::SCARAMUZZA;
} else if (boost::iequals(sModelType, "pinhole")) {
modelType = Camera::PINHOLE;
} else {
std::cerr << "# ERROR: Unknown camera model: " << sModelType << std::endl;
return CameraPtr();
}
}
switch (modelType) {
case Camera::KANNALA_BRANDT: {
EquidistantCameraPtr camera(new EquidistantCamera);
EquidistantCamera::Parameters params = camera->getParameters();
params.readFromYamlFile(filename);
camera->setParameters(params);
return camera;
}
case Camera::PINHOLE: {
PinholeCameraPtr camera(new PinholeCamera);
PinholeCamera::Parameters params = camera->getParameters();
params.readFromYamlFile(filename);
camera->setParameters(params);
return camera;
}
case Camera::SCARAMUZZA: {
OCAMCameraPtr camera(new OCAMCamera);
OCAMCamera::Parameters params = camera->getParameters();
params.readFromYamlFile(filename);
camera->setParameters(params);
return camera;
}
case Camera::MEI:
default: {
CataCameraPtr camera(new CataCamera);
CataCamera::Parameters params = camera->getParameters();
params.readFromYamlFile(filename);
camera->setParameters(params);
return camera;
}
}
return CameraPtr();
}
}

View File

@@ -1,863 +0,0 @@
#include "camodocal/camera_models/CataCamera.h"
#include <cmath>
#include <cstdio>
#include "eigen3/Eigen/Dense"
#include <iomanip>
#include <iostream>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "camodocal/gpl/gpl.h"
namespace camodocal {
CataCamera::Parameters::Parameters()
: Camera::Parameters(MEI),
m_xi(0.0),
m_k1(0.0),
m_k2(0.0),
m_p1(0.0),
m_p2(0.0),
m_gamma1(0.0),
m_gamma2(0.0),
m_u0(0.0),
m_v0(0.0) {}
CataCamera::Parameters::Parameters(
const std::string &cameraName, int w, int h, double xi, double k1,
double k2, double p1, double p2, double gamma1, double gamma2, double u0,
double v0)
: Camera::Parameters(MEI, cameraName, w, h),
m_xi(xi),
m_k1(k1),
m_k2(k2),
m_p1(p1),
m_p2(p2),
m_gamma1(gamma1),
m_gamma2(gamma2),
m_u0(u0),
m_v0(v0) {}
double &CataCamera::Parameters::xi(void) {
return m_xi;
}
double &CataCamera::Parameters::k1(void) {
return m_k1;
}
double &CataCamera::Parameters::k2(void) {
return m_k2;
}
double &CataCamera::Parameters::p1(void) {
return m_p1;
}
double &CataCamera::Parameters::p2(void) {
return m_p2;
}
double &CataCamera::Parameters::gamma1(void) {
return m_gamma1;
}
double &CataCamera::Parameters::gamma2(void) {
return m_gamma2;
}
double &CataCamera::Parameters::u0(void) {
return m_u0;
}
double &CataCamera::Parameters::v0(void) {
return m_v0;
}
double CataCamera::Parameters::xi(void) const {
return m_xi;
}
double CataCamera::Parameters::k1(void) const {
return m_k1;
}
double CataCamera::Parameters::k2(void) const {
return m_k2;
}
double CataCamera::Parameters::p1(void) const {
return m_p1;
}
double CataCamera::Parameters::p2(void) const {
return m_p2;
}
double CataCamera::Parameters::gamma1(void) const {
return m_gamma1;
}
double CataCamera::Parameters::gamma2(void) const {
return m_gamma2;
}
double CataCamera::Parameters::u0(void) const {
return m_u0;
}
double CataCamera::Parameters::v0(void) const {
return m_v0;
}
bool CataCamera::Parameters::readFromYamlFile(const std::string &filename) {
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened()) {
return false;
}
if (!fs["model_type"].isNone()) {
std::string sModelType;
fs["model_type"] >> sModelType;
if (sModelType.compare("MEI") != 0) {
return false;
}
}
m_modelType = MEI;
fs["camera_name"] >> m_cameraName;
m_imageWidth = static_cast<int>(fs["image_width"]);
m_imageHeight = static_cast<int>(fs["image_height"]);
cv::FileNode n = fs["mirror_parameters"];
m_xi = static_cast<double>(n["xi"]);
n = fs["distortion_parameters"];
m_k1 = static_cast<double>(n["k1"]);
m_k2 = static_cast<double>(n["k2"]);
m_p1 = static_cast<double>(n["p1"]);
m_p2 = static_cast<double>(n["p2"]);
n = fs["projection_parameters"];
m_gamma1 = static_cast<double>(n["gamma1"]);
m_gamma2 = static_cast<double>(n["gamma2"]);
m_u0 = static_cast<double>(n["u0"]);
m_v0 = static_cast<double>(n["v0"]);
return true;
}
void CataCamera::Parameters::writeToYamlFile(
const std::string &filename) const {
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
fs << "model_type"
<< "MEI";
fs << "camera_name" << m_cameraName;
fs << "image_width" << m_imageWidth;
fs << "image_height" << m_imageHeight;
// mirror: xi
fs << "mirror_parameters";
fs << "{"
<< "xi" << m_xi << "}";
// radial distortion: k1, k2
// tangential distortion: p1, p2
fs << "distortion_parameters";
fs << "{"
<< "k1" << m_k1 << "k2" << m_k2 << "p1" << m_p1 << "p2" << m_p2 << "}";
// projection: gamma1, gamma2, u0, v0
fs << "projection_parameters";
fs << "{"
<< "gamma1" << m_gamma1 << "gamma2" << m_gamma2 << "u0" << m_u0 << "v0"
<< m_v0 << "}";
fs.release();
}
CataCamera::Parameters &CataCamera::Parameters::operator=(
const CataCamera::Parameters &other) {
if (this != &other) {
m_modelType = other.m_modelType;
m_cameraName = other.m_cameraName;
m_imageWidth = other.m_imageWidth;
m_imageHeight = other.m_imageHeight;
m_xi = other.m_xi;
m_k1 = other.m_k1;
m_k2 = other.m_k2;
m_p1 = other.m_p1;
m_p2 = other.m_p2;
m_gamma1 = other.m_gamma1;
m_gamma2 = other.m_gamma2;
m_u0 = other.m_u0;
m_v0 = other.m_v0;
}
return *this;
}
std::ostream &operator<<(
std::ostream &out, const CataCamera::Parameters &params) {
out << "Camera Parameters:" << std::endl;
out << " model_type "
<< "MEI" << std::endl;
out << " camera_name " << params.m_cameraName << std::endl;
out << " image_width " << params.m_imageWidth << std::endl;
out << " image_height " << params.m_imageHeight << std::endl;
out << "Mirror Parameters" << std::endl;
out << std::fixed << std::setprecision(10);
out << " xi " << params.m_xi << std::endl;
// radial distortion: k1, k2
// tangential distortion: p1, p2
out << "Distortion Parameters" << std::endl;
out << " k1 " << params.m_k1 << std::endl
<< " k2 " << params.m_k2 << std::endl
<< " p1 " << params.m_p1 << std::endl
<< " p2 " << params.m_p2 << std::endl;
// projection: gamma1, gamma2, u0, v0
out << "Projection Parameters" << std::endl;
out << " gamma1 " << params.m_gamma1 << std::endl
<< " gamma2 " << params.m_gamma2 << std::endl
<< " u0 " << params.m_u0 << std::endl
<< " v0 " << params.m_v0 << std::endl;
return out;
}
CataCamera::CataCamera()
: m_inv_K11(1.0),
m_inv_K13(0.0),
m_inv_K22(1.0),
m_inv_K23(0.0),
m_noDistortion(true) {}
CataCamera::CataCamera(
const std::string &cameraName, int imageWidth, int imageHeight, double xi,
double k1, double k2, double p1, double p2, double gamma1, double gamma2,
double u0, double v0)
: mParameters(
cameraName, imageWidth, imageHeight, xi, k1, k2, p1, p2, gamma1,
gamma2, u0, v0) {
if ((mParameters.k1() == 0.0) && (mParameters.k2() == 0.0) &&
(mParameters.p1() == 0.0) && (mParameters.p2() == 0.0)) {
m_noDistortion = true;
} else {
m_noDistortion = false;
}
// Inverse camera projection matrix parameters
m_inv_K11 = 1.0 / mParameters.gamma1();
m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
m_inv_K22 = 1.0 / mParameters.gamma2();
m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
}
CataCamera::CataCamera(const CataCamera::Parameters &params)
: mParameters(params) {
if ((mParameters.k1() == 0.0) && (mParameters.k2() == 0.0) &&
(mParameters.p1() == 0.0) && (mParameters.p2() == 0.0)) {
m_noDistortion = true;
} else {
m_noDistortion = false;
}
// Inverse camera projection matrix parameters
m_inv_K11 = 1.0 / mParameters.gamma1();
m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
m_inv_K22 = 1.0 / mParameters.gamma2();
m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
}
Camera::ModelType CataCamera::modelType(void) const {
return mParameters.modelType();
}
const std::string &CataCamera::cameraName(void) const {
return mParameters.cameraName();
}
int CataCamera::imageWidth(void) const {
return mParameters.imageWidth();
}
int CataCamera::imageHeight(void) const {
return mParameters.imageHeight();
}
void CataCamera::estimateIntrinsics(
const cv::Size &boardSize,
const std::vector<std::vector<cv::Point3f> > &objectPoints,
const std::vector<std::vector<cv::Point2f> > &imagePoints) {
Parameters params = getParameters();
double u0 = params.imageWidth() / 2.0;
double v0 = params.imageHeight() / 2.0;
double gamma0 = 0.0;
double minReprojErr = std::numeric_limits<double>::max();
std::vector<cv::Mat> rvecs, tvecs;
rvecs.assign(objectPoints.size(), cv::Mat());
tvecs.assign(objectPoints.size(), cv::Mat());
params.xi() = 1.0;
params.k1() = 0.0;
params.k2() = 0.0;
params.p1() = 0.0;
params.p2() = 0.0;
params.u0() = u0;
params.v0() = v0;
// Initialize gamma (focal length)
// Use non-radial line image and xi = 1
for (size_t i = 0; i < imagePoints.size(); ++i) {
for (int r = 0; r < boardSize.height; ++r) {
cv::Mat P(boardSize.width, 4, CV_64F);
for (int c = 0; c < boardSize.width; ++c) {
const cv::Point2f &imagePoint =
imagePoints.at(i).at(r * boardSize.width + c);
double u = imagePoint.x - u0;
double v = imagePoint.y - v0;
P.at<double>(c, 0) = u;
P.at<double>(c, 1) = v;
P.at<double>(c, 2) = 0.5;
P.at<double>(c, 3) = -0.5 * (square(u) + square(v));
}
cv::Mat C;
cv::SVD::solveZ(P, C);
double t = square(C.at<double>(0)) + square(C.at<double>(1)) +
C.at<double>(2) * C.at<double>(3);
if (t < 0.0) {
continue;
}
// check that line image is not radial
double d = sqrt(1.0 / t);
double nx = C.at<double>(0) * d;
double ny = C.at<double>(1) * d;
if (hypot(nx, ny) > 0.95) {
continue;
}
double gamma = sqrt(C.at<double>(2) / C.at<double>(3));
params.gamma1() = gamma;
params.gamma2() = gamma;
setParameters(params);
for (size_t j = 0; j < objectPoints.size(); ++j) {
estimateExtrinsics(
objectPoints.at(j), imagePoints.at(j), rvecs.at(j), tvecs.at(j));
}
double reprojErr = reprojectionError(
objectPoints, imagePoints, rvecs, tvecs, cv::noArray());
if (reprojErr < minReprojErr) {
minReprojErr = reprojErr;
gamma0 = gamma;
}
}
}
if (gamma0 <= 0.0 && minReprojErr >= std::numeric_limits<double>::max()) {
std::cout << "[" << params.cameraName() << "] "
<< "# INFO: CataCamera model fails with given data. "
<< std::endl;
return;
}
params.gamma1() = gamma0;
params.gamma2() = gamma0;
setParameters(params);
}
/**
* \brief Lifts a point from the image plane to the unit sphere
*
* \param p image coordinates
* \param P coordinates of the point on the sphere
*/
void CataCamera::liftSphere(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const {
double mx_d, my_d, mx2_d, mxy_d, my2_d, mx_u, my_u;
double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
double lambda;
// Lift points to normalised plane
mx_d = m_inv_K11 * p(0) + m_inv_K13;
my_d = m_inv_K22 * p(1) + m_inv_K23;
if (m_noDistortion) {
mx_u = mx_d;
my_u = my_d;
} else {
// Apply inverse distortion model
if (0) {
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
// Inverse distortion model
// proposed by Heikkila
mx2_d = mx_d * mx_d;
my2_d = my_d * my_d;
mxy_d = mx_d * my_d;
rho2_d = mx2_d + my2_d;
rho4_d = rho2_d * rho2_d;
radDist_d = k1 * rho2_d + k2 * rho4_d;
Dx_d = mx_d * radDist_d + p2 * (rho2_d + 2 * mx2_d) + 2 * p1 * mxy_d;
Dy_d = my_d * radDist_d + p1 * (rho2_d + 2 * my2_d) + 2 * p2 * mxy_d;
inv_denom_d = 1 / (1 + 4 * k1 * rho2_d + 6 * k2 * rho4_d + 8 * p1 * my_d +
8 * p2 * mx_d);
mx_u = mx_d - inv_denom_d * Dx_d;
my_u = my_d - inv_denom_d * Dy_d;
} else {
// Recursive distortion model
int n = 6;
Eigen::Vector2d d_u;
distortion(Eigen::Vector2d(mx_d, my_d), d_u);
// Approximate value
mx_u = mx_d - d_u(0);
my_u = my_d - d_u(1);
for (int i = 1; i < n; ++i) {
distortion(Eigen::Vector2d(mx_u, my_u), d_u);
mx_u = mx_d - d_u(0);
my_u = my_d - d_u(1);
}
}
}
// Lift normalised points to the sphere (inv_hslash)
double xi = mParameters.xi();
if (xi == 1.0) {
lambda = 2.0 / (mx_u * mx_u + my_u * my_u + 1.0);
P << lambda * mx_u, lambda * my_u, lambda - 1.0;
} else {
lambda = (xi + sqrt(1.0 + (1.0 - xi * xi) * (mx_u * mx_u + my_u * my_u))) /
(1.0 + mx_u * mx_u + my_u * my_u);
P << lambda * mx_u, lambda * my_u, lambda - xi;
}
}
/**
* \brief Lifts a point from the image plane to its projective ray
*
* \param p image coordinates
* \param P coordinates of the projective ray
*/
void CataCamera::liftProjective(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const {
double mx_d, my_d, mx2_d, mxy_d, my2_d, mx_u, my_u;
double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
// double lambda;
// Lift points to normalised plane
mx_d = m_inv_K11 * p(0) + m_inv_K13;
my_d = m_inv_K22 * p(1) + m_inv_K23;
if (m_noDistortion) {
mx_u = mx_d;
my_u = my_d;
} else {
if (0) {
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
// Apply inverse distortion model
// proposed by Heikkila
mx2_d = mx_d * mx_d;
my2_d = my_d * my_d;
mxy_d = mx_d * my_d;
rho2_d = mx2_d + my2_d;
rho4_d = rho2_d * rho2_d;
radDist_d = k1 * rho2_d + k2 * rho4_d;
Dx_d = mx_d * radDist_d + p2 * (rho2_d + 2 * mx2_d) + 2 * p1 * mxy_d;
Dy_d = my_d * radDist_d + p1 * (rho2_d + 2 * my2_d) + 2 * p2 * mxy_d;
inv_denom_d = 1 / (1 + 4 * k1 * rho2_d + 6 * k2 * rho4_d + 8 * p1 * my_d +
8 * p2 * mx_d);
mx_u = mx_d - inv_denom_d * Dx_d;
my_u = my_d - inv_denom_d * Dy_d;
} else {
// Recursive distortion model
int n = 8;
Eigen::Vector2d d_u;
distortion(Eigen::Vector2d(mx_d, my_d), d_u);
// Approximate value
mx_u = mx_d - d_u(0);
my_u = my_d - d_u(1);
for (int i = 1; i < n; ++i) {
distortion(Eigen::Vector2d(mx_u, my_u), d_u);
mx_u = mx_d - d_u(0);
my_u = my_d - d_u(1);
}
}
}
// Obtain a projective ray
double xi = mParameters.xi();
if (xi == 1.0) {
P << mx_u, my_u, (1.0 - mx_u * mx_u - my_u * my_u) / 2.0;
} else {
// Reuse variable
rho2_d = mx_u * mx_u + my_u * my_u;
P << mx_u, my_u,
1.0 - xi * (rho2_d + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * rho2_d));
}
}
/**
* \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v)
*
* \param P 3D point coordinates
* \param p return value, contains the image point coordinates
*/
void CataCamera::spaceToPlane(
const Eigen::Vector3d &P, Eigen::Vector2d &p) const {
Eigen::Vector2d p_u, p_d;
// Project points to the normalised plane
double z = P(2) + mParameters.xi() * P.norm();
p_u << P(0) / z, P(1) / z;
if (m_noDistortion) {
p_d = p_u;
} else {
// Apply distortion
Eigen::Vector2d d_u;
distortion(p_u, d_u);
p_d = p_u + d_u;
}
// Apply generalised projection matrix
p << mParameters.gamma1() * p_d(0) + mParameters.u0(),
mParameters.gamma2() * p_d(1) + mParameters.v0();
}
#if 0
/**
* \brief Project a 3D point to the image plane and calculate Jacobian
*
* \param P 3D point coordinates
* \param p return value, contains the image point coordinates
*/
void
CataCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
Eigen::Matrix<double,2,3>& J) const
{
double xi = mParameters.xi();
Eigen::Vector2d p_u, p_d;
double norm, inv_denom;
double dxdmx, dydmx, dxdmy, dydmy;
norm = P.norm();
// Project points to the normalised plane
inv_denom = 1.0 / (P(2) + xi * norm);
p_u << inv_denom * P(0), inv_denom * P(1);
// Calculate jacobian
inv_denom = inv_denom * inv_denom / norm;
double dudx = inv_denom * (norm * P(2) + xi * (P(1) * P(1) + P(2) * P(2)));
double dvdx = -inv_denom * xi * P(0) * P(1);
double dudy = dvdx;
double dvdy = inv_denom * (norm * P(2) + xi * (P(0) * P(0) + P(2) * P(2)));
inv_denom = inv_denom * (-xi * P(2) - norm); // reuse variable
double dudz = P(0) * inv_denom;
double dvdz = P(1) * inv_denom;
if (m_noDistortion)
{
p_d = p_u;
}
else
{
// Apply distortion
Eigen::Vector2d d_u;
distortion(p_u, d_u);
p_d = p_u + d_u;
}
double gamma1 = mParameters.gamma1();
double gamma2 = mParameters.gamma2();
// Make the product of the jacobians
// and add projection matrix jacobian
inv_denom = gamma1 * (dudx * dxdmx + dvdx * dxdmy); // reuse
dvdx = gamma2 * (dudx * dydmx + dvdx * dydmy);
dudx = inv_denom;
inv_denom = gamma1 * (dudy * dxdmx + dvdy * dxdmy); // reuse
dvdy = gamma2 * (dudy * dydmx + dvdy * dydmy);
dudy = inv_denom;
inv_denom = gamma1 * (dudz * dxdmx + dvdz * dxdmy); // reuse
dvdz = gamma2 * (dudz * dydmx + dvdz * dydmy);
dudz = inv_denom;
// Apply generalised projection matrix
p << gamma1 * p_d(0) + mParameters.u0(),
gamma2 * p_d(1) + mParameters.v0();
J << dudx, dudy, dudz,
dvdx, dvdy, dvdz;
}
#endif
/**
* \brief Projects an undistorted 2D point p_u to the image plane
*
* \param p_u 2D point coordinates
* \return image point coordinates
*/
void CataCamera::undistToPlane(
const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const {
Eigen::Vector2d p_d;
if (m_noDistortion) {
p_d = p_u;
} else {
// Apply distortion
Eigen::Vector2d d_u;
distortion(p_u, d_u);
p_d = p_u + d_u;
}
// Apply generalised projection matrix
p << mParameters.gamma1() * p_d(0) + mParameters.u0(),
mParameters.gamma2() * p_d(1) + mParameters.v0();
}
/**
* \brief Apply distortion to input point (from the normalised plane)
*
* \param p_u undistorted coordinates of point on the normalised plane
* \return to obtain the distorted point: p_d = p_u + d_u
*/
void CataCamera::distortion(
const Eigen::Vector2d &p_u, Eigen::Vector2d &d_u) const {
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
mx2_u = p_u(0) * p_u(0);
my2_u = p_u(1) * p_u(1);
mxy_u = p_u(0) * p_u(1);
rho2_u = mx2_u + my2_u;
rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
}
/**
* \brief Apply distortion to input point (from the normalised plane)
* and calculate Jacobian
*
* \param p_u undistorted coordinates of point on the normalised plane
* \return to obtain the distorted point: p_d = p_u + d_u
*/
void CataCamera::distortion(
const Eigen::Vector2d &p_u, Eigen::Vector2d &d_u,
Eigen::Matrix2d &J) const {
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
mx2_u = p_u(0) * p_u(0);
my2_u = p_u(1) * p_u(1);
mxy_u = p_u(0) * p_u(1);
rho2_u = mx2_u + my2_u;
rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u +
k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) +
6.0 * p2 * p_u(0);
double dydmx = k1 * 2.0 * p_u(0) * p_u(1) +
k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) +
2.0 * p2 * p_u(1);
double dxdmy = dydmx;
double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u +
k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) +
2.0 * p2 * p_u(0);
J << dxdmx, dxdmy, dydmx, dydmy;
}
void CataCamera::initUndistortMap(
cv::Mat &map1, cv::Mat &map2, double fScale) const {
cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight());
cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F);
cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F);
for (int v = 0; v < imageSize.height; ++v) {
for (int u = 0; u < imageSize.width; ++u) {
double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale;
double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale;
double xi = mParameters.xi();
double d2 = mx_u * mx_u + my_u * my_u;
Eigen::Vector3d P;
P << mx_u, my_u,
1.0 - xi * (d2 + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * d2));
Eigen::Vector2d p;
spaceToPlane(P, p);
mapX.at<float>(v, u) = p(0);
mapY.at<float>(v, u) = p(1);
}
}
cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
}
cv::Mat CataCamera::initUndistortRectifyMap(
cv::Mat &map1, cv::Mat &map2, float fx, float fy, cv::Size imageSize,
float cx, float cy, cv::Mat rmat) const {
if (imageSize == cv::Size(0, 0)) {
imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight());
}
cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
Eigen::Matrix3f K_rect;
if (cx == -1.0f && cy == -1.0f) {
K_rect << fx, 0, imageSize.width / 2, 0, fy, imageSize.height / 2, 0, 0, 1;
} else {
K_rect << fx, 0, cx, 0, fy, cy, 0, 0, 1;
}
if (fx == -1.0f || fy == -1.0f) {
K_rect(0, 0) = mParameters.gamma1();
K_rect(1, 1) = mParameters.gamma2();
}
Eigen::Matrix3f K_rect_inv = K_rect.inverse();
Eigen::Matrix3f R, R_inv;
cv::cv2eigen(rmat, R);
R_inv = R.inverse();
for (int v = 0; v < imageSize.height; ++v) {
for (int u = 0; u < imageSize.width; ++u) {
Eigen::Vector3f xo;
xo << u, v, 1;
Eigen::Vector3f uo = R_inv * K_rect_inv * xo;
Eigen::Vector2d p;
spaceToPlane(uo.cast<double>(), p);
mapX.at<float>(v, u) = p(0);
mapY.at<float>(v, u) = p(1);
}
}
cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
cv::Mat K_rect_cv;
cv::eigen2cv(K_rect, K_rect_cv);
return K_rect_cv;
}
int CataCamera::parameterCount(void) const {
return 9;
}
const CataCamera::Parameters &CataCamera::getParameters(void) const {
return mParameters;
}
void CataCamera::setParameters(const CataCamera::Parameters &parameters) {
mParameters = parameters;
if ((mParameters.k1() == 0.0) && (mParameters.k2() == 0.0) &&
(mParameters.p1() == 0.0) && (mParameters.p2() == 0.0)) {
m_noDistortion = true;
} else {
m_noDistortion = false;
}
m_inv_K11 = 1.0 / mParameters.gamma1();
m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
m_inv_K22 = 1.0 / mParameters.gamma2();
m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
}
void CataCamera::readParameters(const std::vector<double> &parameterVec) {
if ((int)parameterVec.size() != parameterCount()) {
return;
}
Parameters params = getParameters();
params.xi() = parameterVec.at(0);
params.k1() = parameterVec.at(1);
params.k2() = parameterVec.at(2);
params.p1() = parameterVec.at(3);
params.p2() = parameterVec.at(4);
params.gamma1() = parameterVec.at(5);
params.gamma2() = parameterVec.at(6);
params.u0() = parameterVec.at(7);
params.v0() = parameterVec.at(8);
setParameters(params);
}
void CataCamera::writeParameters(std::vector<double> &parameterVec) const {
parameterVec.resize(parameterCount());
parameterVec.at(0) = mParameters.xi();
parameterVec.at(1) = mParameters.k1();
parameterVec.at(2) = mParameters.k2();
parameterVec.at(3) = mParameters.p1();
parameterVec.at(4) = mParameters.p2();
parameterVec.at(5) = mParameters.gamma1();
parameterVec.at(6) = mParameters.gamma2();
parameterVec.at(7) = mParameters.u0();
parameterVec.at(8) = mParameters.v0();
}
void CataCamera::writeParametersToYamlFile(const std::string &filename) const {
mParameters.writeToYamlFile(filename);
}
std::string CataCamera::parametersToString(void) const {
std::ostringstream oss;
oss << mParameters;
return oss.str();
}
}

View File

@@ -1,752 +0,0 @@
#include "camodocal/camera_models/PinholeCamera.h"
#include <cmath>
#include <cstdio>
#include "eigen3/Eigen/Dense"
#include <iomanip>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "camodocal/gpl/gpl.h"
namespace camodocal {
PinholeCamera::Parameters::Parameters()
: Camera::Parameters(PINHOLE),
m_k1(0.0),
m_k2(0.0),
m_p1(0.0),
m_p2(0.0),
m_fx(0.0),
m_fy(0.0),
m_cx(0.0),
m_cy(0.0) {}
PinholeCamera::Parameters::Parameters(
const std::string &cameraName, int w, int h, double k1, double k2,
double p1, double p2, double fx, double fy, double cx, double cy)
: Camera::Parameters(PINHOLE, cameraName, w, h),
m_k1(k1),
m_k2(k2),
m_p1(p1),
m_p2(p2),
m_fx(fx),
m_fy(fy),
m_cx(cx),
m_cy(cy) {}
double &PinholeCamera::Parameters::k1(void) {
return m_k1;
}
double &PinholeCamera::Parameters::k2(void) {
return m_k2;
}
double &PinholeCamera::Parameters::p1(void) {
return m_p1;
}
double &PinholeCamera::Parameters::p2(void) {
return m_p2;
}
double &PinholeCamera::Parameters::fx(void) {
return m_fx;
}
double &PinholeCamera::Parameters::fy(void) {
return m_fy;
}
double &PinholeCamera::Parameters::cx(void) {
return m_cx;
}
double &PinholeCamera::Parameters::cy(void) {
return m_cy;
}
double PinholeCamera::Parameters::k1(void) const {
return m_k1;
}
double PinholeCamera::Parameters::k2(void) const {
return m_k2;
}
double PinholeCamera::Parameters::p1(void) const {
return m_p1;
}
double PinholeCamera::Parameters::p2(void) const {
return m_p2;
}
double PinholeCamera::Parameters::fx(void) const {
return m_fx;
}
double PinholeCamera::Parameters::fy(void) const {
return m_fy;
}
double PinholeCamera::Parameters::cx(void) const {
return m_cx;
}
double PinholeCamera::Parameters::cy(void) const {
return m_cy;
}
bool PinholeCamera::Parameters::readFromYamlFile(const std::string &filename) {
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened()) {
return false;
}
if (!fs["model_type"].isNone()) {
std::string sModelType;
fs["model_type"] >> sModelType;
if (sModelType.compare("PINHOLE") != 0) {
return false;
}
}
m_modelType = PINHOLE;
fs["camera_name"] >> m_cameraName;
m_imageWidth = static_cast<int>(fs["image_width"]);
m_imageHeight = static_cast<int>(fs["image_height"]);
cv::FileNode n = fs["distortion_parameters"];
m_k1 = static_cast<double>(n["k1"]);
m_k2 = static_cast<double>(n["k2"]);
m_p1 = static_cast<double>(n["p1"]);
m_p2 = static_cast<double>(n["p2"]);
n = fs["projection_parameters"];
m_fx = static_cast<double>(n["fx"]);
m_fy = static_cast<double>(n["fy"]);
m_cx = static_cast<double>(n["cx"]);
m_cy = static_cast<double>(n["cy"]);
return true;
}
void PinholeCamera::Parameters::writeToYamlFile(
const std::string &filename) const {
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
fs << "model_type"
<< "PINHOLE";
fs << "camera_name" << m_cameraName;
fs << "image_width" << m_imageWidth;
fs << "image_height" << m_imageHeight;
// radial distortion: k1, k2
// tangential distortion: p1, p2
fs << "distortion_parameters";
fs << "{"
<< "k1" << m_k1 << "k2" << m_k2 << "p1" << m_p1 << "p2" << m_p2 << "}";
// projection: fx, fy, cx, cy
fs << "projection_parameters";
fs << "{"
<< "fx" << m_fx << "fy" << m_fy << "cx" << m_cx << "cy" << m_cy << "}";
fs.release();
}
PinholeCamera::Parameters &PinholeCamera::Parameters::operator=(
const PinholeCamera::Parameters &other) {
if (this != &other) {
m_modelType = other.m_modelType;
m_cameraName = other.m_cameraName;
m_imageWidth = other.m_imageWidth;
m_imageHeight = other.m_imageHeight;
m_k1 = other.m_k1;
m_k2 = other.m_k2;
m_p1 = other.m_p1;
m_p2 = other.m_p2;
m_fx = other.m_fx;
m_fy = other.m_fy;
m_cx = other.m_cx;
m_cy = other.m_cy;
}
return *this;
}
std::ostream &operator<<(
std::ostream &out, const PinholeCamera::Parameters &params) {
out << "Camera Parameters:" << std::endl;
out << " model_type "
<< "PINHOLE" << std::endl;
out << " camera_name " << params.m_cameraName << std::endl;
out << " image_width " << params.m_imageWidth << std::endl;
out << " image_height " << params.m_imageHeight << std::endl;
// radial distortion: k1, k2
// tangential distortion: p1, p2
out << "Distortion Parameters" << std::endl;
out << " k1 " << params.m_k1 << std::endl
<< " k2 " << params.m_k2 << std::endl
<< " p1 " << params.m_p1 << std::endl
<< " p2 " << params.m_p2 << std::endl;
// projection: fx, fy, cx, cy
out << "Projection Parameters" << std::endl;
out << " fx " << params.m_fx << std::endl
<< " fy " << params.m_fy << std::endl
<< " cx " << params.m_cx << std::endl
<< " cy " << params.m_cy << std::endl;
return out;
}
PinholeCamera::PinholeCamera()
: m_inv_K11(1.0),
m_inv_K13(0.0),
m_inv_K22(1.0),
m_inv_K23(0.0),
m_noDistortion(true) {}
PinholeCamera::PinholeCamera(
const std::string &cameraName, int imageWidth, int imageHeight, double k1,
double k2, double p1, double p2, double fx, double fy, double cx, double cy)
: mParameters(
cameraName, imageWidth, imageHeight, k1, k2, p1, p2, fx, fy, cx, cy) {
if ((mParameters.k1() == 0.0) && (mParameters.k2() == 0.0) &&
(mParameters.p1() == 0.0) && (mParameters.p2() == 0.0)) {
m_noDistortion = true;
} else {
m_noDistortion = false;
}
// Inverse camera projection matrix parameters
m_inv_K11 = 1.0 / mParameters.fx();
m_inv_K13 = -mParameters.cx() / mParameters.fx();
m_inv_K22 = 1.0 / mParameters.fy();
m_inv_K23 = -mParameters.cy() / mParameters.fy();
}
PinholeCamera::PinholeCamera(const PinholeCamera::Parameters &params)
: mParameters(params) {
if ((mParameters.k1() == 0.0) && (mParameters.k2() == 0.0) &&
(mParameters.p1() == 0.0) && (mParameters.p2() == 0.0)) {
m_noDistortion = true;
} else {
m_noDistortion = false;
}
// Inverse camera projection matrix parameters
m_inv_K11 = 1.0 / mParameters.fx();
m_inv_K13 = -mParameters.cx() / mParameters.fx();
m_inv_K22 = 1.0 / mParameters.fy();
m_inv_K23 = -mParameters.cy() / mParameters.fy();
}
Camera::ModelType PinholeCamera::modelType(void) const {
return mParameters.modelType();
}
const std::string &PinholeCamera::cameraName(void) const {
return mParameters.cameraName();
}
int PinholeCamera::imageWidth(void) const {
return mParameters.imageWidth();
}
int PinholeCamera::imageHeight(void) const {
return mParameters.imageHeight();
}
void PinholeCamera::estimateIntrinsics(
const cv::Size &boardSize,
const std::vector<std::vector<cv::Point3f> > &objectPoints,
const std::vector<std::vector<cv::Point2f> > &imagePoints) {
// Z. Zhang, A Flexible New Technique for Camera Calibration, PAMI 2000
Parameters params = getParameters();
params.k1() = 0.0;
params.k2() = 0.0;
params.p1() = 0.0;
params.p2() = 0.0;
double cx = params.imageWidth() / 2.0;
double cy = params.imageHeight() / 2.0;
params.cx() = cx;
params.cy() = cy;
size_t nImages = imagePoints.size();
cv::Mat A(nImages * 2, 2, CV_64F);
cv::Mat b(nImages * 2, 1, CV_64F);
for (size_t i = 0; i < nImages; ++i) {
const std::vector<cv::Point3f> &oPoints = objectPoints.at(i);
std::vector<cv::Point2f> M(oPoints.size());
for (size_t j = 0; j < M.size(); ++j) {
M.at(j) = cv::Point2f(oPoints.at(j).x, oPoints.at(j).y);
}
cv::Mat H = cv::findHomography(M, imagePoints.at(i));
H.at<double>(0, 0) -= H.at<double>(2, 0) * cx;
H.at<double>(0, 1) -= H.at<double>(2, 1) * cx;
H.at<double>(0, 2) -= H.at<double>(2, 2) * cx;
H.at<double>(1, 0) -= H.at<double>(2, 0) * cy;
H.at<double>(1, 1) -= H.at<double>(2, 1) * cy;
H.at<double>(1, 2) -= H.at<double>(2, 2) * cy;
double h[3], v[3], d1[3], d2[3];
double n[4] = {0, 0, 0, 0};
for (int j = 0; j < 3; ++j) {
double t0 = H.at<double>(j, 0);
double t1 = H.at<double>(j, 1);
h[j] = t0;
v[j] = t1;
d1[j] = (t0 + t1) * 0.5;
d2[j] = (t0 - t1) * 0.5;
n[0] += t0 * t0;
n[1] += t1 * t1;
n[2] += d1[j] * d1[j];
n[3] += d2[j] * d2[j];
}
for (int j = 0; j < 4; ++j) {
n[j] = 1.0 / sqrt(n[j]);
}
for (int j = 0; j < 3; ++j) {
h[j] *= n[0];
v[j] *= n[1];
d1[j] *= n[2];
d2[j] *= n[3];
}
A.at<double>(i * 2, 0) = h[0] * v[0];
A.at<double>(i * 2, 1) = h[1] * v[1];
A.at<double>(i * 2 + 1, 0) = d1[0] * d2[0];
A.at<double>(i * 2 + 1, 1) = d1[1] * d2[1];
b.at<double>(i * 2, 0) = -h[2] * v[2];
b.at<double>(i * 2 + 1, 0) = -d1[2] * d2[2];
}
cv::Mat f(2, 1, CV_64F);
cv::solve(A, b, f, cv::DECOMP_NORMAL | cv::DECOMP_LU);
params.fx() = sqrt(fabs(1.0 / f.at<double>(0)));
params.fy() = sqrt(fabs(1.0 / f.at<double>(1)));
setParameters(params);
}
/**
* \brief Lifts a point from the image plane to the unit sphere
*
* \param p image coordinates
* \param P coordinates of the point on the sphere
*/
void PinholeCamera::liftSphere(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const {
liftProjective(p, P);
P.normalize();
}
/**
* \brief Lifts a point from the image plane to its projective ray
*
* \param p image coordinates
* \param P coordinates of the projective ray
*/
void PinholeCamera::liftProjective(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const {
double mx_d, my_d, mx2_d, mxy_d, my2_d, mx_u, my_u;
double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
// double lambda;
// Lift points to normalised plane
mx_d = m_inv_K11 * p(0) + m_inv_K13;
my_d = m_inv_K22 * p(1) + m_inv_K23;
if (m_noDistortion) {
mx_u = mx_d;
my_u = my_d;
} else {
if (0) {
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
// Apply inverse distortion model
// proposed by Heikkila
mx2_d = mx_d * mx_d;
my2_d = my_d * my_d;
mxy_d = mx_d * my_d;
rho2_d = mx2_d + my2_d;
rho4_d = rho2_d * rho2_d;
radDist_d = k1 * rho2_d + k2 * rho4_d;
Dx_d = mx_d * radDist_d + p2 * (rho2_d + 2 * mx2_d) + 2 * p1 * mxy_d;
Dy_d = my_d * radDist_d + p1 * (rho2_d + 2 * my2_d) + 2 * p2 * mxy_d;
inv_denom_d = 1 / (1 + 4 * k1 * rho2_d + 6 * k2 * rho4_d + 8 * p1 * my_d +
8 * p2 * mx_d);
mx_u = mx_d - inv_denom_d * Dx_d;
my_u = my_d - inv_denom_d * Dy_d;
} else {
// Recursive distortion model
int n = 8;
Eigen::Vector2d d_u;
distortion(Eigen::Vector2d(mx_d, my_d), d_u);
// Approximate value
mx_u = mx_d - d_u(0);
my_u = my_d - d_u(1);
for (int i = 1; i < n; ++i) {
distortion(Eigen::Vector2d(mx_u, my_u), d_u);
mx_u = mx_d - d_u(0);
my_u = my_d - d_u(1);
}
}
}
// Obtain a projective ray
P << mx_u, my_u, 1.0;
}
/**
* \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v)
*
* \param P 3D point coordinates
* \param p return value, contains the image point coordinates
*/
void PinholeCamera::spaceToPlane(
const Eigen::Vector3d &P, Eigen::Vector2d &p) const {
Eigen::Vector2d p_u, p_d;
// Project points to the normalised plane
p_u << P(0) / P(2), P(1) / P(2);
if (m_noDistortion) {
p_d = p_u;
} else {
// Apply distortion
Eigen::Vector2d d_u;
distortion(p_u, d_u);
p_d = p_u + d_u;
}
// Apply generalised projection matrix
p << mParameters.fx() * p_d(0) + mParameters.cx(),
mParameters.fy() * p_d(1) + mParameters.cy();
}
#if 0
/**
* \brief Project a 3D point to the image plane and calculate Jacobian
*
* \param P 3D point coordinates
* \param p return value, contains the image point coordinates
*/
void
PinholeCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
Eigen::Matrix<double,2,3>& J) const
{
Eigen::Vector2d p_u, p_d;
double norm, inv_denom;
double dxdmx, dydmx, dxdmy, dydmy;
norm = P.norm();
// Project points to the normalised plane
inv_denom = 1.0 / P(2);
p_u << inv_denom * P(0), inv_denom * P(1);
// Calculate jacobian
double dudx = inv_denom;
double dvdx = 0.0;
double dudy = 0.0;
double dvdy = inv_denom;
inv_denom = - inv_denom * inv_denom;
double dudz = P(0) * inv_denom;
double dvdz = P(1) * inv_denom;
if (m_noDistortion)
{
p_d = p_u;
}
else
{
// Apply distortion
Eigen::Vector2d d_u;
distortion(p_u, d_u);
p_d = p_u + d_u;
}
double fx = mParameters.fx();
double fy = mParameters.fy();
// Make the product of the jacobians
// and add projection matrix jacobian
inv_denom = fx * (dudx * dxdmx + dvdx * dxdmy); // reuse
dvdx = fy * (dudx * dydmx + dvdx * dydmy);
dudx = inv_denom;
inv_denom = fx * (dudy * dxdmx + dvdy * dxdmy); // reuse
dvdy = fy * (dudy * dydmx + dvdy * dydmy);
dudy = inv_denom;
inv_denom = fx * (dudz * dxdmx + dvdz * dxdmy); // reuse
dvdz = fy * (dudz * dydmx + dvdz * dydmy);
dudz = inv_denom;
// Apply generalised projection matrix
p << fx * p_d(0) + mParameters.cx(),
fy * p_d(1) + mParameters.cy();
J << dudx, dudy, dudz,
dvdx, dvdy, dvdz;
}
#endif
/**
* \brief Projects an undistorted 2D point p_u to the image plane
*
* \param p_u 2D point coordinates
* \return image point coordinates
*/
void PinholeCamera::undistToPlane(
const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const {
Eigen::Vector2d p_d;
if (m_noDistortion) {
p_d = p_u;
} else {
// Apply distortion
Eigen::Vector2d d_u;
distortion(p_u, d_u);
p_d = p_u + d_u;
}
// Apply generalised projection matrix
p << mParameters.fx() * p_d(0) + mParameters.cx(),
mParameters.fy() * p_d(1) + mParameters.cy();
}
/**
* \brief Apply distortion to input point (from the normalised plane)
*
* \param p_u undistorted coordinates of point on the normalised plane
* \return to obtain the distorted point: p_d = p_u + d_u
*/
void PinholeCamera::distortion(
const Eigen::Vector2d &p_u, Eigen::Vector2d &d_u) const {
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
mx2_u = p_u(0) * p_u(0);
my2_u = p_u(1) * p_u(1);
mxy_u = p_u(0) * p_u(1);
rho2_u = mx2_u + my2_u;
rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
}
/**
* \brief Apply distortion to input point (from the normalised plane)
* and calculate Jacobian
*
* \param p_u undistorted coordinates of point on the normalised plane
* \return to obtain the distorted point: p_d = p_u + d_u
*/
void PinholeCamera::distortion(
const Eigen::Vector2d &p_u, Eigen::Vector2d &d_u,
Eigen::Matrix2d &J) const {
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
mx2_u = p_u(0) * p_u(0);
my2_u = p_u(1) * p_u(1);
mxy_u = p_u(0) * p_u(1);
rho2_u = mx2_u + my2_u;
rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u +
k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) +
6.0 * p2 * p_u(0);
double dydmx = k1 * 2.0 * p_u(0) * p_u(1) +
k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) +
2.0 * p2 * p_u(1);
double dxdmy = dydmx;
double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u +
k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) +
2.0 * p2 * p_u(0);
J << dxdmx, dxdmy, dydmx, dydmy;
}
void PinholeCamera::initUndistortMap(
cv::Mat &map1, cv::Mat &map2, double fScale) const {
cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight());
cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F);
cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F);
for (int v = 0; v < imageSize.height; ++v) {
for (int u = 0; u < imageSize.width; ++u) {
double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale;
double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale;
Eigen::Vector3d P;
P << mx_u, my_u, 1.0;
Eigen::Vector2d p;
spaceToPlane(P, p);
mapX.at<float>(v, u) = p(0);
mapY.at<float>(v, u) = p(1);
}
}
cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
}
cv::Mat PinholeCamera::initUndistortRectifyMap(
cv::Mat &map1, cv::Mat &map2, float fx, float fy, cv::Size imageSize,
float cx, float cy, cv::Mat rmat) const {
if (imageSize == cv::Size(0, 0)) {
imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight());
}
cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
Eigen::Matrix3f R, R_inv;
cv::cv2eigen(rmat, R);
R_inv = R.inverse();
// assume no skew
Eigen::Matrix3f K_rect;
if (cx == -1.0f || cy == -1.0f) {
K_rect << fx, 0, imageSize.width / 2, 0, fy, imageSize.height / 2, 0, 0, 1;
} else {
K_rect << fx, 0, cx, 0, fy, cy, 0, 0, 1;
}
if (fx == -1.0f || fy == -1.0f) {
K_rect(0, 0) = mParameters.fx();
K_rect(1, 1) = mParameters.fy();
}
Eigen::Matrix3f K_rect_inv = K_rect.inverse();
for (int v = 0; v < imageSize.height; ++v) {
for (int u = 0; u < imageSize.width; ++u) {
Eigen::Vector3f xo;
xo << u, v, 1;
Eigen::Vector3f uo = R_inv * K_rect_inv * xo;
Eigen::Vector2d p;
spaceToPlane(uo.cast<double>(), p);
mapX.at<float>(v, u) = p(0);
mapY.at<float>(v, u) = p(1);
}
}
cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
cv::Mat K_rect_cv;
cv::eigen2cv(K_rect, K_rect_cv);
return K_rect_cv;
}
int PinholeCamera::parameterCount(void) const {
return 8;
}
const PinholeCamera::Parameters &PinholeCamera::getParameters(void) const {
return mParameters;
}
void PinholeCamera::setParameters(const PinholeCamera::Parameters &parameters) {
mParameters = parameters;
if ((mParameters.k1() == 0.0) && (mParameters.k2() == 0.0) &&
(mParameters.p1() == 0.0) && (mParameters.p2() == 0.0)) {
m_noDistortion = true;
} else {
m_noDistortion = false;
}
m_inv_K11 = 1.0 / mParameters.fx();
m_inv_K13 = -mParameters.cx() / mParameters.fx();
m_inv_K22 = 1.0 / mParameters.fy();
m_inv_K23 = -mParameters.cy() / mParameters.fy();
}
void PinholeCamera::readParameters(const std::vector<double> &parameterVec) {
if ((int)parameterVec.size() != parameterCount()) {
return;
}
Parameters params = getParameters();
params.k1() = parameterVec.at(0);
params.k2() = parameterVec.at(1);
params.p1() = parameterVec.at(2);
params.p2() = parameterVec.at(3);
params.fx() = parameterVec.at(4);
params.fy() = parameterVec.at(5);
params.cx() = parameterVec.at(6);
params.cy() = parameterVec.at(7);
setParameters(params);
}
void PinholeCamera::writeParameters(std::vector<double> &parameterVec) const {
parameterVec.resize(parameterCount());
parameterVec.at(0) = mParameters.k1();
parameterVec.at(1) = mParameters.k2();
parameterVec.at(2) = mParameters.p1();
parameterVec.at(3) = mParameters.p2();
parameterVec.at(4) = mParameters.fx();
parameterVec.at(5) = mParameters.fy();
parameterVec.at(6) = mParameters.cx();
parameterVec.at(7) = mParameters.cy();
}
void PinholeCamera::writeParametersToYamlFile(
const std::string &filename) const {
mParameters.writeToYamlFile(filename);
}
std::string PinholeCamera::parametersToString(void) const {
std::ostringstream oss;
oss << mParameters;
return oss.str();
}
}

View File

@@ -1,802 +0,0 @@
#include "camodocal/camera_models/ScaramuzzaCamera.h"
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include <cmath>
#include <cstdio>
#include "eigen3/Eigen/Dense"
#include "eigen3/Eigen/SVD"
#include <iomanip>
#include <iostream>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "camodocal/gpl/gpl.h"
Eigen::VectorXd polyfit(
Eigen::VectorXd &xVec, Eigen::VectorXd &yVec, int poly_order) {
assert(poly_order > 0);
assert(xVec.size() > poly_order);
assert(xVec.size() == yVec.size());
Eigen::MatrixXd A(xVec.size(), poly_order + 1);
Eigen::VectorXd B(xVec.size());
for (int i = 0; i < xVec.size(); ++i) {
const double x = xVec(i);
const double y = yVec(i);
double x_pow_k = 1.0;
for (int k = 0; k <= poly_order; ++k) {
A(i, k) = x_pow_k;
x_pow_k *= x;
}
B(i) = y;
}
Eigen::JacobiSVD<Eigen::MatrixXd> svd(
A, Eigen::ComputeThinU | Eigen::ComputeThinV);
Eigen::VectorXd x = svd.solve(B);
return x;
}
namespace camodocal {
OCAMCamera::Parameters::Parameters()
: Camera::Parameters(SCARAMUZZA),
m_C(0.0),
m_D(0.0),
m_E(0.0),
m_center_x(0.0),
m_center_y(0.0) {
memset(m_poly, 0, sizeof(double) * SCARAMUZZA_POLY_SIZE);
memset(m_inv_poly, 0, sizeof(double) * SCARAMUZZA_INV_POLY_SIZE);
}
bool OCAMCamera::Parameters::readFromYamlFile(const std::string &filename) {
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened()) {
return false;
}
if (!fs["model_type"].isNone()) {
std::string sModelType;
fs["model_type"] >> sModelType;
if (!boost::iequals(sModelType, "scaramuzza")) {
return false;
}
}
m_modelType = SCARAMUZZA;
fs["camera_name"] >> m_cameraName;
m_imageWidth = static_cast<int>(fs["image_width"]);
m_imageHeight = static_cast<int>(fs["image_height"]);
cv::FileNode n = fs["poly_parameters"];
for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++)
m_poly[i] = static_cast<double>(
n[std::string("p") + boost::lexical_cast<std::string>(i)]);
n = fs["inv_poly_parameters"];
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
m_inv_poly[i] = static_cast<double>(
n[std::string("p") + boost::lexical_cast<std::string>(i)]);
n = fs["affine_parameters"];
m_C = static_cast<double>(n["ac"]);
m_D = static_cast<double>(n["ad"]);
m_E = static_cast<double>(n["ae"]);
m_center_x = static_cast<double>(n["cx"]);
m_center_y = static_cast<double>(n["cy"]);
return true;
}
void OCAMCamera::Parameters::writeToYamlFile(
const std::string &filename) const {
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
fs << "model_type"
<< "scaramuzza";
fs << "camera_name" << m_cameraName;
fs << "image_width" << m_imageWidth;
fs << "image_height" << m_imageHeight;
fs << "poly_parameters";
fs << "{";
for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++)
fs << std::string("p") + boost::lexical_cast<std::string>(i) << m_poly[i];
fs << "}";
fs << "inv_poly_parameters";
fs << "{";
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
fs << std::string("p") + boost::lexical_cast<std::string>(i)
<< m_inv_poly[i];
fs << "}";
fs << "affine_parameters";
fs << "{"
<< "ac" << m_C << "ad" << m_D << "ae" << m_E << "cx" << m_center_x << "cy"
<< m_center_y << "}";
fs.release();
}
OCAMCamera::Parameters &OCAMCamera::Parameters::operator=(
const OCAMCamera::Parameters &other) {
if (this != &other) {
m_modelType = other.m_modelType;
m_cameraName = other.m_cameraName;
m_imageWidth = other.m_imageWidth;
m_imageHeight = other.m_imageHeight;
m_C = other.m_C;
m_D = other.m_D;
m_E = other.m_E;
m_center_x = other.m_center_x;
m_center_y = other.m_center_y;
memcpy(m_poly, other.m_poly, sizeof(double) * SCARAMUZZA_POLY_SIZE);
memcpy(
m_inv_poly, other.m_inv_poly,
sizeof(double) * SCARAMUZZA_INV_POLY_SIZE);
}
return *this;
}
std::ostream &operator<<(
std::ostream &out, const OCAMCamera::Parameters &params) {
out << "Camera Parameters:" << std::endl;
out << " model_type "
<< "scaramuzza" << std::endl;
out << " camera_name " << params.m_cameraName << std::endl;
out << " image_width " << params.m_imageWidth << std::endl;
out << " image_height " << params.m_imageHeight << std::endl;
out << std::fixed << std::setprecision(10);
out << "Poly Parameters" << std::endl;
for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++)
out << std::string("p") + boost::lexical_cast<std::string>(i) << ": "
<< params.m_poly[i] << std::endl;
out << "Inverse Poly Parameters" << std::endl;
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
out << std::string("p") + boost::lexical_cast<std::string>(i) << ": "
<< params.m_inv_poly[i] << std::endl;
out << "Affine Parameters" << std::endl;
out << " ac " << params.m_C << std::endl
<< " ad " << params.m_D << std::endl
<< " ae " << params.m_E << std::endl;
out << " cx " << params.m_center_x << std::endl
<< " cy " << params.m_center_y << std::endl;
return out;
}
OCAMCamera::OCAMCamera() : m_inv_scale(0.0) {}
OCAMCamera::OCAMCamera(const OCAMCamera::Parameters &params)
: mParameters(params) {
m_inv_scale = 1.0 / (params.C() - params.D() * params.E());
}
Camera::ModelType OCAMCamera::modelType(void) const {
return mParameters.modelType();
}
const std::string &OCAMCamera::cameraName(void) const {
return mParameters.cameraName();
}
int OCAMCamera::imageWidth(void) const {
return mParameters.imageWidth();
}
int OCAMCamera::imageHeight(void) const {
return mParameters.imageHeight();
}
void OCAMCamera::estimateIntrinsics(
const cv::Size &boardSize,
const std::vector<std::vector<cv::Point3f> > &objectPoints,
const std::vector<std::vector<cv::Point2f> > &imagePoints) {
// std::cout << "OCAMCamera::estimateIntrinsics - NOT IMPLEMENTED" <<
// std::endl;
// throw std::string("OCAMCamera::estimateIntrinsics - NOT IMPLEMENTED");
// Reference: Page 30 of
// " Scaramuzza, D. Omnidirectional Vision: from Calibration to Robot Motion
// Estimation, ETH Zurich. Thesis no. 17635."
// http://e-collection.library.ethz.ch/eserv/eth:30301/eth-30301-02.pdf
// Matlab code: calibrate.m
// First, estimate every image's extrinsics parameters
std::vector<Eigen::Matrix3d> RList;
std::vector<Eigen::Vector3d> TList;
RList.reserve(imagePoints.size());
TList.reserve(imagePoints.size());
// i-th image
for (size_t image_index = 0; image_index < imagePoints.size();
++image_index) {
const std::vector<cv::Point3f> &objPts = objectPoints.at(image_index);
const std::vector<cv::Point2f> &imgPts = imagePoints.at(image_index);
assert(objPts.size() == imgPts.size());
assert(
objPts.size() ==
static_cast<unsigned int>(boardSize.width * boardSize.height));
Eigen::MatrixXd M(objPts.size(), 6);
for (size_t corner_index = 0; corner_index < objPts.size();
++corner_index) {
double X = objPts.at(corner_index).x;
double Y = objPts.at(corner_index).y;
assert(objPts.at(corner_index).z == 0.0);
double u = imgPts.at(corner_index).x;
double v = imgPts.at(corner_index).y;
M(corner_index, 0) = -v * X;
M(corner_index, 1) = -v * Y;
M(corner_index, 2) = u * X;
M(corner_index, 3) = u * Y;
M(corner_index, 4) = -v;
M(corner_index, 5) = u;
}
Eigen::JacobiSVD<Eigen::MatrixXd> svd(
M, Eigen::ComputeFullU | Eigen::ComputeFullV);
assert(svd.matrixV().cols() == 6);
Eigen::VectorXd h = -svd.matrixV().col(5);
// scaled version of R and T
const double sr11 = h(0);
const double sr12 = h(1);
const double sr21 = h(2);
const double sr22 = h(3);
const double st1 = h(4);
const double st2 = h(5);
const double AA = square(sr11 * sr12 + sr21 * sr22);
const double BB = square(sr11) + square(sr21);
const double CC = square(sr12) + square(sr22);
const double sr32_squared_1 =
(-(CC - BB) + sqrt(square(CC - BB) + 4.0 * AA)) / 2.0;
const double sr32_squared_2 =
(-(CC - BB) - sqrt(square(CC - BB) + 4.0 * AA)) / 2.0;
// printf("rst = %.12f\n", sr32_squared_1*sr32_squared_1 +
// (CC-BB)*sr32_squared_1 - AA);
std::vector<double> sr32_squared_values;
if (sr32_squared_1 > 0)
sr32_squared_values.push_back(sr32_squared_1);
if (sr32_squared_2 > 0)
sr32_squared_values.push_back(sr32_squared_2);
assert(!sr32_squared_values.empty());
std::vector<double> sr32_values;
std::vector<double> sr31_values;
for (auto sr32_squared : sr32_squared_values) {
for (int sign = -1; sign <= 1; sign += 2) {
const double sr32 = static_cast<double>(sign) * std::sqrt(sr32_squared);
sr32_values.push_back(sr32);
if (sr32_squared == 0.0) {
// sr31 can be calculated through norm equality,
// but it has positive and negative posibilities
// positive one
sr31_values.push_back(std::sqrt(CC - BB));
// negative one
sr32_values.push_back(sr32);
sr31_values.push_back(-std::sqrt(CC - BB));
break; // skip the same situation
} else {
// sr31 can be calculated throught dot product == 0
sr31_values.push_back(-(sr11 * sr12 + sr21 * sr22) / sr32);
}
}
}
// std::cout << "h= " << std::setprecision(12) << h.transpose() <<
// std::endl;
// std::cout << "length: " << sr32_values.size() << " & " <<
// sr31_values.size() << std::endl;
assert(!sr31_values.empty());
assert(sr31_values.size() == sr32_values.size());
std::vector<Eigen::Matrix3d> H_values;
for (size_t i = 0; i < sr31_values.size(); ++i) {
const double sr31 = sr31_values.at(i);
const double sr32 = sr32_values.at(i);
const double lambda = 1.0 / sqrt(sr11 * sr11 + sr21 * sr21 + sr31 * sr31);
Eigen::Matrix3d H;
H.setZero();
H(0, 0) = sr11;
H(0, 1) = sr12;
H(0, 2) = st1;
H(1, 0) = sr21;
H(1, 1) = sr22;
H(1, 2) = st2;
H(2, 0) = sr31;
H(2, 1) = sr32;
H(2, 2) = 0;
H_values.push_back(lambda * H);
H_values.push_back(-lambda * H);
}
for (auto &H : H_values) {
// std::cout << "H=\n" << H << std::endl;
Eigen::Matrix3d R;
R.col(0) = H.col(0);
R.col(1) = H.col(1);
R.col(2) = H.col(0).cross(H.col(1));
// std::cout << "R33 = " << R(2,2) << std::endl;
}
std::vector<Eigen::Matrix3d> H_candidates;
for (auto &H : H_values) {
Eigen::MatrixXd A_mat(2 * imagePoints.at(image_index).size(), 4);
Eigen::VectorXd B_vec(2 * imagePoints.at(image_index).size());
A_mat.setZero();
B_vec.setZero();
size_t line_index = 0;
// iterate images
const double &r11 = H(0, 0);
const double &r12 = H(0, 1);
// const double& r13 = H(0,2);
const double &r21 = H(1, 0);
const double &r22 = H(1, 1);
// const double& r23 = H(1,2);
const double &r31 = H(2, 0);
const double &r32 = H(2, 1);
// const double& r33 = H(2,2);
const double &t1 = H(0);
const double &t2 = H(1);
// iterate chessboard corners in the image
for (size_t j = 0; j < imagePoints.at(image_index).size(); ++j) {
assert(line_index == 2 * j);
const double &X = objectPoints.at(image_index).at(j).x;
const double &Y = objectPoints.at(image_index).at(j).y;
const double &u = imagePoints.at(image_index).at(j).x;
const double &v = imagePoints.at(image_index).at(j).y;
double A = r21 * X + r22 * Y + t2;
double B = v * (r31 * X + r32 * Y);
double C = r11 * X + r12 * Y + t1;
double D = u * (r31 * X + r32 * Y);
double rou = std::sqrt(u * u + v * v);
A_mat(line_index + 0, 0) = A;
A_mat(line_index + 1, 0) = C;
A_mat(line_index + 0, 1) = A * rou;
A_mat(line_index + 1, 1) = C * rou;
A_mat(line_index + 0, 2) = A * rou * rou;
A_mat(line_index + 1, 2) = C * rou * rou;
A_mat(line_index + 0, 3) = -v;
A_mat(line_index + 1, 3) = -u;
B_vec(line_index + 0) = B;
B_vec(line_index + 1) = D;
line_index += 2;
}
assert(line_index == static_cast<unsigned int>(A_mat.rows()));
// pseudo-inverse for polynomial parameters and all t3s
{
Eigen::JacobiSVD<Eigen::MatrixXd> svd(
A_mat, Eigen::ComputeThinU | Eigen::ComputeThinV);
Eigen::VectorXd x = svd.solve(B_vec);
// std::cout << "x(poly and t3) = " << x << std::endl;
if (x(2) > 0 && x(3) > 0) {
H_candidates.push_back(H);
}
}
}
// printf("H_candidates.size()=%zu\n", H_candidates.size());
assert(H_candidates.size() == 1);
Eigen::Matrix3d &H = H_candidates.front();
Eigen::Matrix3d R;
R.col(0) = H.col(0);
R.col(1) = H.col(1);
R.col(2) = H.col(0).cross(H.col(1));
Eigen::Vector3d T = H.col(2);
RList.push_back(R);
TList.push_back(T);
// std::cout << "#" << image_index << " frame" << " R =" << R << " \nT = "
// << T.transpose() << std::endl;
}
// Second, estimate camera intrinsic parameters and all t3
Eigen::MatrixXd A_mat(
2 * imagePoints.size() * imagePoints.at(0).size(),
SCARAMUZZA_POLY_SIZE - 1 + imagePoints.size());
Eigen::VectorXd B_vec(2 * imagePoints.size() * imagePoints.at(0).size());
A_mat.setZero();
B_vec.setZero();
size_t line_index = 0;
// iterate images
for (size_t i = 0; i < imagePoints.size(); ++i) {
const double &r11 = RList.at(i)(0, 0);
const double &r12 = RList.at(i)(0, 1);
// const double& r13 = RList.at(i)(0,2);
const double &r21 = RList.at(i)(1, 0);
const double &r22 = RList.at(i)(1, 1);
// const double& r23 = RList.at(i)(1,2);
const double &r31 = RList.at(i)(2, 0);
const double &r32 = RList.at(i)(2, 1);
// const double& r33 = RList.at(i)(2,2);
const double &t1 = TList.at(i)(0);
const double &t2 = TList.at(i)(1);
// iterate chessboard corners in the image
for (size_t j = 0; j < imagePoints.at(i).size(); ++j) {
assert(line_index == 2 * (i * imagePoints.at(0).size() + j));
const double &X = objectPoints.at(i).at(j).x;
const double &Y = objectPoints.at(i).at(j).y;
const double &u = imagePoints.at(i).at(j).x;
const double &v = imagePoints.at(i).at(j).y;
double A = r21 * X + r22 * Y + t2;
double B = v * (r31 * X + r32 * Y);
double C = r11 * X + r12 * Y + t1;
double D = u * (r31 * X + r32 * Y);
double rou = std::sqrt(u * u + v * v);
for (int k = 1; k <= SCARAMUZZA_POLY_SIZE - 1; ++k) {
double pow_rou = 0.0;
if (k == 1) {
pow_rou = 1.0;
} else {
pow_rou = std::pow(rou, k);
}
A_mat(line_index + 0, k - 1) = A * pow_rou;
A_mat(line_index + 1, k - 1) = C * pow_rou;
}
A_mat(line_index + 0, SCARAMUZZA_POLY_SIZE - 1 + i) = -v;
A_mat(line_index + 1, SCARAMUZZA_POLY_SIZE - 1 + i) = -u;
B_vec(line_index + 0) = B;
B_vec(line_index + 1) = D;
line_index += 2;
}
}
assert(line_index == static_cast<unsigned int>(A_mat.rows()));
Eigen::Matrix<double, SCARAMUZZA_POLY_SIZE, 1> poly_coeff;
// pseudo-inverse for polynomial parameters and all t3s
{
Eigen::JacobiSVD<Eigen::MatrixXd> svd(
A_mat, Eigen::ComputeThinU | Eigen::ComputeThinV);
Eigen::VectorXd x = svd.solve(B_vec);
poly_coeff[0] = x(0);
poly_coeff[1] = 0.0;
for (int i = 1; i < poly_coeff.size() - 1; ++i) {
poly_coeff[i + 1] = x(i);
}
assert(
x.size() ==
static_cast<unsigned int>(SCARAMUZZA_POLY_SIZE - 1 + TList.size()));
}
Parameters params = getParameters();
// Affine matrix A is constructed as [C D; E 1]
params.C() = 1.0;
params.D() = 0.0;
params.E() = 0.0;
params.center_x() = params.imageWidth() / 2.0;
params.center_y() = params.imageHeight() / 2.0;
for (size_t i = 0; i < SCARAMUZZA_POLY_SIZE; ++i) {
params.poly(i) = poly_coeff[i];
}
// params.poly(0) = -216.9657476318;
// params.poly(1) = 0.0;
// params.poly(2) = 0.0017866911;
// params.poly(3) = -0.0000019866;
// params.poly(4) = 0.0000000077;
// inv_poly
{
std::vector<double> rou_vec;
std::vector<double> z_vec;
for (double rou = 0.0;
rou <= (params.imageWidth() + params.imageHeight()) / 2; rou += 0.1) {
double rou_pow_k = 1.0;
double z = 0.0;
for (int k = 0; k < SCARAMUZZA_POLY_SIZE; k++) {
z += rou_pow_k * params.poly(k);
rou_pow_k *= rou;
}
rou_vec.push_back(rou);
z_vec.push_back(z);
}
assert(rou_vec.size() == z_vec.size());
Eigen::VectorXd xVec(rou_vec.size());
Eigen::VectorXd yVec(rou_vec.size());
for (size_t i = 0; i < rou_vec.size(); ++i) {
xVec(i) = std::atan2(-z_vec.at(i), rou_vec.at(i));
yVec(i) = rou_vec.at(i);
}
// use lower order poly to eliminate over-fitting cause by noisy/inaccurate
// data
const int poly_fit_order = 4;
Eigen::VectorXd inv_poly_coeff = polyfit(xVec, yVec, poly_fit_order);
for (int i = 0; i <= poly_fit_order; ++i) {
params.inv_poly(i) = inv_poly_coeff(i);
}
}
setParameters(params);
std::cout << "initial params:\n" << params << std::endl;
}
/**
* \brief Lifts a point from the image plane to the unit sphere
*
* \param p image coordinates
* \param P coordinates of the point on the sphere
*/
void OCAMCamera::liftSphere(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const {
liftProjective(p, P);
P.normalize();
}
/**
* \brief Lifts a point from the image plane to its projective ray
*
* \param p image coordinates
* \param P coordinates of the projective ray
*/
void OCAMCamera::liftProjective(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const {
// Relative to Center
Eigen::Vector2d xc(
p[0] - mParameters.center_x(), p[1] - mParameters.center_y());
// Affine Transformation
// xc_a = inv(A) * xc;
Eigen::Vector2d xc_a(
m_inv_scale * (xc[0] - mParameters.D() * xc[1]),
m_inv_scale * (-mParameters.E() * xc[0] + mParameters.C() * xc[1]));
double phi = std::sqrt(xc_a[0] * xc_a[0] + xc_a[1] * xc_a[1]);
double phi_i = 1.0;
double z = 0.0;
for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++) {
z += phi_i * mParameters.poly(i);
phi_i *= phi;
}
P << xc[0], xc[1], -z;
}
/**
* \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v)
*
* \param P 3D point coordinates
* \param p return value, contains the image point coordinates
*/
void OCAMCamera::spaceToPlane(
const Eigen::Vector3d &P, Eigen::Vector2d &p) const {
double norm = std::sqrt(P[0] * P[0] + P[1] * P[1]);
double theta = std::atan2(-P[2], norm);
double rho = 0.0;
double theta_i = 1.0;
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) {
rho += theta_i * mParameters.inv_poly(i);
theta_i *= theta;
}
double invNorm = 1.0 / norm;
Eigen::Vector2d xn(P[0] * invNorm * rho, P[1] * invNorm * rho);
p << xn[0] * mParameters.C() + xn[1] * mParameters.D() +
mParameters.center_x(),
xn[0] * mParameters.E() + xn[1] + mParameters.center_y();
}
/**
* \brief Projects an undistorted 2D point p_u to the image plane
*
* \param p_u 2D point coordinates
* \return image point coordinates
*/
void OCAMCamera::undistToPlane(
const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const {
Eigen::Vector3d P(p_u[0], p_u[1], 1.0);
spaceToPlane(P, p);
}
#if 0
void
OCAMCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const
{
cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight());
cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F);
cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F);
for (int v = 0; v < imageSize.height; ++v)
{
for (int u = 0; u < imageSize.width; ++u)
{
double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale;
double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale;
double xi = mParameters.xi();
double d2 = mx_u * mx_u + my_u * my_u;
Eigen::Vector3d P;
P << mx_u, my_u, 1.0 - xi * (d2 + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * d2));
Eigen::Vector2d p;
spaceToPlane(P, p);
mapX.at<float>(v,u) = p(0);
mapY.at<float>(v,u) = p(1);
}
}
cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
}
#endif
cv::Mat OCAMCamera::initUndistortRectifyMap(
cv::Mat &map1, cv::Mat &map2, float fx, float fy, cv::Size imageSize,
float cx, float cy, cv::Mat rmat) const {
if (imageSize == cv::Size(0, 0)) {
imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight());
}
cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
Eigen::Matrix3f K_rect;
K_rect << fx, 0, cx < 0 ? imageSize.width / 2 : cx, 0, fy,
cy < 0 ? imageSize.height / 2 : cy, 0, 0, 1;
if (fx < 0 || fy < 0) {
throw std::string(
std::string(__FUNCTION__) + ": Focal length must be specified");
}
Eigen::Matrix3f K_rect_inv = K_rect.inverse();
Eigen::Matrix3f R, R_inv;
cv::cv2eigen(rmat, R);
R_inv = R.inverse();
for (int v = 0; v < imageSize.height; ++v) {
for (int u = 0; u < imageSize.width; ++u) {
Eigen::Vector3f xo;
xo << u, v, 1;
Eigen::Vector3f uo = R_inv * K_rect_inv * xo;
Eigen::Vector2d p;
spaceToPlane(uo.cast<double>(), p);
mapX.at<float>(v, u) = p(0);
mapY.at<float>(v, u) = p(1);
}
}
cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
cv::Mat K_rect_cv;
cv::eigen2cv(K_rect, K_rect_cv);
return K_rect_cv;
}
int OCAMCamera::parameterCount(void) const {
return SCARAMUZZA_CAMERA_NUM_PARAMS;
}
const OCAMCamera::Parameters &OCAMCamera::getParameters(void) const {
return mParameters;
}
void OCAMCamera::setParameters(const OCAMCamera::Parameters &parameters) {
mParameters = parameters;
m_inv_scale = 1.0 / (parameters.C() - parameters.D() * parameters.E());
}
void OCAMCamera::readParameters(const std::vector<double> &parameterVec) {
if ((int)parameterVec.size() != parameterCount()) {
return;
}
Parameters params = getParameters();
params.C() = parameterVec.at(0);
params.D() = parameterVec.at(1);
params.E() = parameterVec.at(2);
params.center_x() = parameterVec.at(3);
params.center_y() = parameterVec.at(4);
for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++)
params.poly(i) = parameterVec.at(5 + i);
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
params.inv_poly(i) = parameterVec.at(5 + SCARAMUZZA_POLY_SIZE + i);
setParameters(params);
}
void OCAMCamera::writeParameters(std::vector<double> &parameterVec) const {
parameterVec.resize(parameterCount());
parameterVec.at(0) = mParameters.C();
parameterVec.at(1) = mParameters.D();
parameterVec.at(2) = mParameters.E();
parameterVec.at(3) = mParameters.center_x();
parameterVec.at(4) = mParameters.center_y();
for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++)
parameterVec.at(5 + i) = mParameters.poly(i);
for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
parameterVec.at(5 + SCARAMUZZA_POLY_SIZE + i) = mParameters.inv_poly(i);
}
void OCAMCamera::writeParametersToYamlFile(const std::string &filename) const {
mParameters.writeToYamlFile(filename);
}
std::string OCAMCamera::parametersToString(void) const {
std::ostringstream oss;
oss << mParameters;
return oss.str();
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,43 +0,0 @@
#include "camodocal/gpl/EigenQuaternionParameterization.h"
#include <cmath>
namespace camodocal {
bool EigenQuaternionParameterization::Plus(
const double *x, const double *delta, double *x_plus_delta) const {
const double norm_delta =
sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]);
if (norm_delta > 0.0) {
const double sin_delta_by_delta = (sin(norm_delta) / norm_delta);
double q_delta[4];
q_delta[0] = sin_delta_by_delta * delta[0];
q_delta[1] = sin_delta_by_delta * delta[1];
q_delta[2] = sin_delta_by_delta * delta[2];
q_delta[3] = cos(norm_delta);
EigenQuaternionProduct(q_delta, x, x_plus_delta);
} else {
for (int i = 0; i < 4; ++i) {
x_plus_delta[i] = x[i];
}
}
return true;
}
bool EigenQuaternionParameterization::ComputeJacobian(
const double *x, double *jacobian) const {
jacobian[0] = x[3];
jacobian[1] = x[2];
jacobian[2] = -x[1]; // NOLINT
jacobian[3] = -x[2];
jacobian[4] = x[3];
jacobian[5] = x[0]; // NOLINT
jacobian[6] = x[1];
jacobian[7] = -x[0];
jacobian[8] = x[3]; // NOLINT
jacobian[9] = -x[0];
jacobian[10] = -x[1];
jacobian[11] = -x[2]; // NOLINT
return true;
}
}

View File

@@ -1,55 +0,0 @@
#include <camodocal/sparse_graph/Transform.h>
namespace camodocal {
Transform::Transform() {
m_q.setIdentity();
m_t.setZero();
}
Transform::Transform(const Eigen::Matrix4d &H) {
m_q = Eigen::Quaterniond(H.block<3, 3>(0, 0));
m_t = H.block<3, 1>(0, 3);
}
Eigen::Quaterniond &Transform::rotation(void) {
return m_q;
}
const Eigen::Quaterniond &Transform::rotation(void) const {
return m_q;
}
double *Transform::rotationData(void) {
return m_q.coeffs().data();
}
const double *const Transform::rotationData(void) const {
return m_q.coeffs().data();
}
Eigen::Vector3d &Transform::translation(void) {
return m_t;
}
const Eigen::Vector3d &Transform::translation(void) const {
return m_t;
}
double *Transform::translationData(void) {
return m_t.data();
}
const double *const Transform::translationData(void) const {
return m_t.data();
}
Eigen::Matrix4d Transform::toMatrix(void) const {
Eigen::Matrix4d H;
H.setIdentity();
H.block<3, 3>(0, 0) = m_q.toRotationMatrix();
H.block<3, 1>(0, 3) = m_t;
return H;
}
}

View File

@@ -348,7 +348,7 @@ api::StreamData Synthetic::GetStreamData(const Stream &stream) {
int num = 0;
for (auto it : streams) {
if (it.stream == stream) {
if (num == 0) {
if (num == 1) {
return {output->first_data,
output->first,
nullptr,
@@ -510,7 +510,7 @@ void Synthetic::InitProcessors() {
depth_processor = std::make_shared<DepthProcessorOCV>(DEPTH_PROC_PERIOD);
}
auto root_processor =
std::make_shared<RootProcessor>(RECTIFY_PROC_PERIOD);
std::make_shared<RootProcessor>(ROOT_PROC_PERIOD);
root_processor->AddChild(rectify_processor);
rectify_processor->addTargetStreams(