style(camodocal): camodocal style

This commit is contained in:
TinyOh 2019-01-25 11:55:41 +08:00
parent 54eae3e2d0
commit 7dabf5cc9d
6 changed files with 199 additions and 239 deletions

View File

@ -1,10 +1,23 @@
#ifndef CAMERA_H
#define CAMERA_H
// Copyright 2018 Slightech Co., Ltd. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_CAMERA_MODELS_CAMERA_H_
#define SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_CAMERA_MODELS_CAMERA_H_
#include <vector>
#include <boost/shared_ptr.hpp>
#include "eigen3/Eigen/Dense"
#include <opencv2/core/core.hpp>
#include <vector>
namespace camodocal {
@ -16,7 +29,7 @@ class Camera {
class Parameters {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Parameters(ModelType modelType);
explicit Parameters(ModelType modelType);
Parameters(
ModelType modelType, const std::string &cameraName, int w, int h);
@ -33,9 +46,6 @@ class Camera {
int nIntrinsics(void) const;
virtual bool readFromYamlFile(const std::string &filename) = 0;
virtual void writeToYamlFile(const std::string &filename) const = 0;
protected:
ModelType m_modelType;
int m_nIntrinsics;
@ -58,50 +68,39 @@ class Camera {
const std::vector<std::vector<cv::Point2f> > &imagePoints) = 0;
virtual void estimateExtrinsics(
const std::vector<cv::Point3f> &objectPoints,
const std::vector<cv::Point2f> &imagePoints, cv::Mat &rvec,
cv::Mat &tvec) const;
// Lift points from the image plane to the sphere
virtual void liftSphere(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const = 0;
//%output P
const std::vector<cv::Point2f> &imagePoints,
cv::Mat &rvec, cv::Mat &tvec) const; // NOLINT
// Lift points from the image plane to the projective space
virtual void liftProjective(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const = 0;
//%output P
const Eigen::Vector2d &p, Eigen::Vector3d &P) const = 0; // NOLINT
// %output P
// Projects 3D points to the image plane (Pi function)
virtual void spaceToPlane(
const Eigen::Vector3d &P, Eigen::Vector2d &p) const = 0;
//%output p
const Eigen::Vector3d &P, Eigen::Vector2d &p) const = 0; // NOLINT
// %output p
// Projects 3D points to the image plane (Pi function)
// and calculates jacobian
// virtual void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
// Eigen::Matrix<double,2,3>& J) const = 0;
//%output p
//%output J
virtual void undistToPlane(
const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const = 0;
//%output p
// %output p
// %output J
// virtual void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale =
// 1.0) const = 0;
virtual cv::Mat initUndistortRectifyMap(
cv::Mat &map1, cv::Mat &map2, float fx = -1.0f, float fy = -1.0f,
cv::Mat &map1, cv::Mat &map2, float fx = -1.0f, float fy = -1.0f, // NOLINT
cv::Size imageSize = cv::Size(0, 0), float cx = -1.0f, float cy = -1.0f,
cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const = 0;
virtual int parameterCount(void) const = 0;
virtual void readParameters(const std::vector<double> &parameters) = 0;
virtual void writeParameters(std::vector<double> &parameters) const = 0;
virtual void writeParameters(std::vector<double> &parameters) const = 0; // NOLINT
virtual void writeParametersToYamlFile(const std::string &filename) const = 0;
virtual std::string parametersToString(void) const = 0;
virtual std::string parametersToString(void) const = 0; // NOLINT
/**
* \brief Calculates the reprojection distance between points
@ -125,7 +124,7 @@ class Camera {
void projectPoints(
const std::vector<cv::Point3f> &objectPoints, const cv::Mat &rvec,
const cv::Mat &tvec, std::vector<cv::Point2f> &imagePoints) const;
const cv::Mat &tvec, std::vector<cv::Point2f> &imagePoints) const; // NOLINT
protected:
cv::Mat m_mask;
@ -135,4 +134,5 @@ typedef boost::shared_ptr<Camera> CameraPtr;
typedef boost::shared_ptr<const Camera> CameraConstPtr;
}
#endif
#endif // SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_CAMERA_MODELS_CAMERA_H_

View File

@ -1,10 +1,22 @@
#ifndef EQUIDISTANTCAMERA_H
#define EQUIDISTANTCAMERA_H
// Copyright 2018 Slightech Co., Ltd. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_CAMERA_MODELS_EQUIDISTANTCAMERA_H_
#define SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_CAMERA_MODELS_EQUIDISTANTCAMERA_H_
#include <opencv2/core/core.hpp>
#include <string>
#include "Camera.h"
#include <opencv2/core/core.hpp>
namespace camodocal {
@ -59,9 +71,6 @@ class EquidistantCamera : public Camera {
double u0(void) const;
double v0(void) const;
bool readFromYamlFile(const std::string &filename);
void writeToYamlFile(const std::string &filename) const;
Parameters &operator=(const Parameters &other);
friend std::ostream &operator<<(
std::ostream &out, const Parameters &params);
@ -91,7 +100,7 @@ class EquidistantCamera : public Camera {
/**
* \brief Constructor from the projection model parameters
*/
EquidistantCamera(const Parameters &params);
explicit EquidistantCamera(const Parameters &params);
Camera::ModelType modelType(void) const;
const std::string &cameraName(void) const;
@ -103,38 +112,31 @@ class EquidistantCamera : public Camera {
const std::vector<std::vector<cv::Point3f> > &objectPoints,
const std::vector<std::vector<cv::Point2f> > &imagePoints);
// Lift points from the image plane to the sphere
virtual void liftSphere(const Eigen::Vector2d &p, Eigen::Vector3d &P) const;
//%output P
// Lift points from the image plane to the projective space
void liftProjective(const Eigen::Vector2d &p, Eigen::Vector3d &P) const;
//%output P
void liftProjective(const Eigen::Vector2d &p, Eigen::Vector3d &P) const; // NOLINT
// %output P
// Projects 3D points to the image plane (Pi function)
void spaceToPlane(const Eigen::Vector3d &P, Eigen::Vector2d &p) const;
//%output p
void spaceToPlane(const Eigen::Vector3d &P, Eigen::Vector2d &p) const; // NOLINT
// %output p
// Projects 3D points to the image plane (Pi function)
// and calculates jacobian
void spaceToPlane(
const Eigen::Vector3d &P, Eigen::Vector2d &p,
Eigen::Matrix<double, 2, 3> &J) const;
//%output p
//%output J
void undistToPlane(const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const;
//%output p
const Eigen::Vector3d &P, Eigen::Vector2d &p, // NOLINT
Eigen::Matrix<double, 2, 3> &J) const; // NOLINT
// %output p
// %output J
template <typename T>
static void spaceToPlane(
const T *const params, const T *const q, const T *const t,
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 2, 1> &p);
const Eigen::Matrix<T, 3, 1> &P, Eigen::Matrix<T, 2, 1> &p); // NOLINT
void initUndistortMap(
cv::Mat &map1, cv::Mat &map2, double fScale = 1.0) const;
cv::Mat &map1, cv::Mat &map2, double fScale = 1.0) const; // NOLINT
cv::Mat initUndistortRectifyMap(
cv::Mat &map1, cv::Mat &map2, float fx = -1.0f, float fy = -1.0f,
cv::Mat &map1, cv::Mat &map2, float fx = -1.0f, float fy = -1.0f, // NOLINT
cv::Size imageSize = cv::Size(0, 0), float cx = -1.0f, float cy = -1.0f,
cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;
@ -144,9 +146,7 @@ class EquidistantCamera : public Camera {
void setParameters(const Parameters &parameters);
void readParameters(const std::vector<double> &parameterVec);
void writeParameters(std::vector<double> &parameterVec) const;
void writeParametersToYamlFile(const std::string &filename) const;
void writeParameters(std::vector<double> &parameterVec) const; // NOLINT
std::string parametersToString(void) const;
@ -156,10 +156,10 @@ class EquidistantCamera : public Camera {
void fitOddPoly(
const std::vector<double> &x, const std::vector<double> &y, int n,
std::vector<double> &coeffs) const;
std::vector<double> &coeffs) const; // NOLINT
void backprojectSymmetric(
const Eigen::Vector2d &p_u, double &theta, double &phi) const;
const Eigen::Vector2d &p_u, double &theta, double &phi) const; // NOLINT
Parameters mParameters;
@ -227,4 +227,5 @@ void EquidistantCamera::spaceToPlane(
}
}
#endif
#endif // SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_CAMERA_MODELS_EQUIDISTANTCAMERA_H_

View File

@ -1,6 +1,20 @@
#ifndef GPL_H
#define GPL_H
// Copyright 2018 Slightech Co., Ltd. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_GPL_GPL_H_
#define SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_GPL_GPL_H_
#include <vector>
#include <algorithm>
#include <cmath>
#include <opencv2/core/core.hpp>
@ -48,7 +62,7 @@ const T cube(const T &x) {
template <class T>
const T random(const T &a, const T &b) {
return static_cast<double>(rand()) / RAND_MAX * (b - a) + a;
return static_cast<double>(rand()) / RAND_MAX * (b - a) + a; // NOLINT
}
template <class T>
@ -66,35 +80,36 @@ const T randomNormal(const T &sigma) {
return x1 * w * sigma;
}
unsigned long long timeInMicroseconds(void);
unsigned long long timeInMicroseconds(void); // NOLINT
double timeInSeconds(void);
void colorDepthImage(
cv::Mat &imgDepth, cv::Mat &imgColoredDepth, float minRange,
cv::Mat &imgDepth, cv::Mat &imgColoredDepth, float minRange, // NOLINT
float maxRange);
bool colormap(
const std::string &name, unsigned char idx, float &r, float &g, float &b);
const std::string &name, unsigned char idx, float &r, float &g, float &b); // NOLINT
std::vector<cv::Point2i> bresLine(int x0, int y0, int x1, int y1);
std::vector<cv::Point2i> bresCircle(int x0, int y0, int r);
void fitCircle(
const std::vector<cv::Point2d> &points, double &centerX, double &centerY,
double &radius);
const std::vector<cv::Point2d> &points, double &centerX, double &centerY, // NOLINT
double &radius); // NOLINT
std::vector<cv::Point2d> intersectCircles(
double x1, double y1, double r1, double x2, double y2, double r2);
void LLtoUTM(
double latitude, double longitude, double &utmNorthing, double &utmEasting,
std::string &utmZone);
double latitude, double longitude, double &utmNorthing, double &utmEasting, // NOLINT
std::string &utmZone); // NOLINT
void UTMtoLL(
double utmNorthing, double utmEasting, const std::string &utmZone,
double &latitude, double &longitude);
double utmNorthing, double utmEasting, const std::string &utmZone, // NOLINT
double &latitude, double &longitude); // NOLINT
long int timestampDiff(uint64_t t1, uint64_t t2);
long int timestampDiff(uint64_t t1, uint64_t t2); // NOLINT
}
#endif
#endif // SRC_MYNTEYE_API_CAMODOCAL_INCLUDE_CAMODOCAL_GPL_GPL_H_

View File

@ -1,6 +1,18 @@
#include "camodocal/camera_models/Camera.h"
// Copyright 2018 Slightech Co., Ltd. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <opencv2/calib3d/calib3d.hpp>
#include "camodocal/camera_models/Camera.h"
namespace camodocal {

View File

@ -1,14 +1,26 @@
#include "camodocal/camera_models/EquidistantCamera.h"
// Copyright 2018 Slightech Co., Ltd. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iomanip>
#include <iostream>
#include <cstdint>
#include <cmath>
#include <cstdio>
#include "camodocal/camera_models/EquidistantCamera.h"
#include "eigen3/Eigen/Dense"
#include <iomanip>
#include <iostream>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "camodocal/gpl/gpl.h"
namespace camodocal {
@ -19,13 +31,20 @@ float ApproxAtan2(float y, float x)
const float n1 = 0.97239411f;
const float n2 = -0.19194795f;
float result = 0.0f;
if (x != 0.0f)
{
const union { float flVal; std::uint32_t nVal; } tYSign = { y };
const union { float flVal; std::uint32_t nVal; } tXSign = { x };
if (fabsf(x) >= fabsf(y))
{
union { float flVal; std::uint32_t nVal; } tOffset = { PI };
if (x != 0.0f) {
const union {
float flVal;
std::uint32_t nVal;
} tYSign = { y };
const union {
float flVal;
std::uint32_t nVal;
} tXSign = { x };
if (fabsf(x) >= fabsf(y)) {
union {
float flVal;
std::uint32_t nVal;
} tOffset = { PI };
// Add or subtract PI based on y's sign.
tOffset.nVal |= tYSign.nVal & 0x80000000u;
// No offset if x is positive, so multiply by 0 or based on x's sign.
@ -33,23 +52,20 @@ float ApproxAtan2(float y, float x)
result = tOffset.flVal;
const float z = y / x;
result += (n1 + n2 * z * z) * z;
}
else // Use atan(y/x) = pi/2 - atan(x/y) if |y/x| > 1.
{
union { float flVal; std::uint32_t nVal; } tOffset = { PI_2 };
} else { // Use atan(y/x) = pi/2 - atan(x/y) if |y/x| > 1. n
union {
float flVal;
std::uint32_t nVal;
} tOffset = { PI_2 };
// Add or subtract PI/2 based on y's sign.
tOffset.nVal |= tYSign.nVal & 0x80000000u;
result = tOffset.flVal;
const float z = x / y;
result -= (n1 + n2 * z * z) * z;
}
}
else if (y > 0.0f)
{
} else if (y > 0.0f) {
result = PI_2;
}
else if (y < 0.0f)
{
} else if (y < 0.0f) {
result = -PI_2;
}
return result;
@ -143,60 +159,6 @@ double EquidistantCamera::Parameters::v0(void) const {
return m_v0;
}
bool EquidistantCamera::Parameters::readFromYamlFile(
const std::string &filename) {
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened()) {
return false;
}
if (!fs["model_type"].isNone()) {
std::string sModelType;
fs["model_type"] >> sModelType;
if (sModelType.compare("KANNALA_BRANDT") != 0) {
return false;
}
}
m_modelType = KANNALA_BRANDT;
fs["camera_name"] >> m_cameraName;
m_imageWidth = static_cast<int>(fs["image_width"]);
m_imageHeight = static_cast<int>(fs["image_height"]);
cv::FileNode n = fs["projection_parameters"];
m_k2 = static_cast<double>(n["k2"]);
m_k3 = static_cast<double>(n["k3"]);
m_k4 = static_cast<double>(n["k4"]);
m_k5 = static_cast<double>(n["k5"]);
m_mu = static_cast<double>(n["mu"]);
m_mv = static_cast<double>(n["mv"]);
m_u0 = static_cast<double>(n["u0"]);
m_v0 = static_cast<double>(n["v0"]);
return true;
}
void EquidistantCamera::Parameters::writeToYamlFile(
const std::string &filename) const {
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
fs << "model_type"
<< "KANNALA_BRANDT";
fs << "camera_name" << m_cameraName;
fs << "image_width" << m_imageWidth;
fs << "image_height" << m_imageHeight;
// projection: k2, k3, k4, k5, mu, mv, u0, v0
fs << "projection_parameters";
fs << "{"
<< "k2" << m_k2 << "k3" << m_k3 << "k4" << m_k4 << "k5" << m_k5 << "mu"
<< m_mu << "mv" << m_mv << "u0" << m_u0 << "v0" << m_v0 << "}";
fs.release();
}
EquidistantCamera::Parameters &EquidistantCamera::Parameters::operator=(
const EquidistantCamera::Parameters &other) {
if (this != &other) {
@ -313,7 +275,7 @@ void EquidistantCamera::estimateIntrinsics(
double f0 = 0.0;
for (size_t i = 0; i < imagePoints.size(); ++i) {
std::vector<Eigen::Vector2d> center(boardSize.height);
double radius[boardSize.height];
double radius[boardSize.height]; // NOLINT
for (int r = 0; r < boardSize.height; ++r) {
std::vector<cv::Point2d> circle;
for (int c = 0; c < boardSize.width; ++c) {
@ -373,17 +335,6 @@ void EquidistantCamera::estimateIntrinsics(
setParameters(params);
}
/**
* \brief Lifts a point from the image plane to the unit sphere
*
* \param p image coordinates
* \param P coordinates of the point on the sphere
*/
void EquidistantCamera::liftSphere(
const Eigen::Vector2d &p, Eigen::Vector3d &P) const {
liftProjective(p, P);
}
/**
* \brief Lifts a point from the image plane to its projective ray
*
@ -413,15 +364,15 @@ void EquidistantCamera::liftProjective(
*/
void EquidistantCamera::spaceToPlane(
const Eigen::Vector3d &P, Eigen::Vector2d &p) const {
// double theta = acos(0.5);
//double theta = 0.5;
// double theta = acos(0.5);
// double theta = 0.5;
// double phi = 0.5;
// Eigen::Vector2d p_u = r(mParameters.k2(), mParameters.k3(), mParameters.k4(),
// mParameters.k5(), theta) *
// Eigen::Vector2d(cos(0.5), sin(0.5));
double theta = acos(P(2) / P.norm());
double phi = atan2(P(1), P(0));
//double phi = ApproxAtan2(P(1), P(0));
// double phi = ApproxAtan2(P(1), P(0));
Eigen::Vector2d p_u = r(mParameters.k2(), mParameters.k3(), mParameters.k4(),
mParameters.k5(), theta) *
@ -453,33 +404,6 @@ void EquidistantCamera::spaceToPlane(
mParameters.mv() * p_u(1) + mParameters.v0();
}
/**
* \brief Projects an undistorted 2D point p_u to the image plane
*
* \param p_u 2D point coordinates
* \return image point coordinates
*/
void EquidistantCamera::undistToPlane(
const Eigen::Vector2d &p_u, Eigen::Vector2d &p) const {
// Eigen::Vector2d p_d;
//
// if (m_noDistortion)
// {
// p_d = p_u;
// }
// else
// {
// // Apply distortion
// Eigen::Vector2d d_u;
// distortion(p_u, d_u);
// p_d = p_u + d_u;
// }
//
// // Apply generalised projection matrix
// p << mParameters.gamma1() * p_d(0) + mParameters.u0(),
// mParameters.gamma2() * p_d(1) + mParameters.v0();
}
void EquidistantCamera::initUndistortMap(
cv::Mat &map1, cv::Mat &map2, double fScale) const {
cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight());
@ -613,11 +537,6 @@ void EquidistantCamera::writeParameters(
parameterVec.at(7) = mParameters.v0();
}
void EquidistantCamera::writeParametersToYamlFile(
const std::string &filename) const {
mParameters.writeToYamlFile(filename);
}
std::string EquidistantCamera::parametersToString(void) const {
std::ostringstream oss;
oss << mParameters;
@ -730,4 +649,4 @@ void EquidistantCamera::backprojectSymmetric(
}
}
}
}
} // namespace camodocal

View File

@ -1,3 +1,16 @@
// Copyright 2018 Slightech Co., Ltd. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "camodocal/gpl/gpl.h"
#include <set>
@ -112,15 +125,15 @@ int clock_gettime(int X, struct timespec *tp) {
if (usePerformanceCounter) {
QueryPerformanceCounter(&offset);
frequencyToMicroseconds =
(double)performanceFrequency.QuadPart / 1000000.;
static_cast<double>(performanceFrequency.QuadPart / 1000000.);
} else {
offset = getFILETIMEoffset();
frequencyToMicroseconds = 10.;
}
}
if (usePerformanceCounter)
if (usePerformanceCounter) {
QueryPerformanceCounter(&t);
else {
} else {
GetSystemTimeAsFileTime(&f);
t.QuadPart = f.dwHighDateTime;
t.QuadPart <<= 32;
@ -128,7 +141,7 @@ int clock_gettime(int X, struct timespec *tp) {
}
t.QuadPart -= offset.QuadPart;
microseconds = (double)t.QuadPart / frequencyToMicroseconds;
microseconds = static_cast<double>(t.QuadPart / frequencyToMicroseconds);
t.QuadPart = microseconds;
tp->tv_sec = t.QuadPart / 1000000;
tp->tv_nsec = (t.QuadPart % 1000000) * 1000;
@ -136,7 +149,7 @@ int clock_gettime(int X, struct timespec *tp) {
}
#endif
unsigned long long timeInMicroseconds(void) {
unsigned long long timeInMicroseconds(void) { // NOLINT
struct timespec tp;
#ifdef __APPLE__
tp = orwl_gettime();
@ -271,7 +284,7 @@ float colormapJet[128][3] = {
{0.53125f, 0.0f, 0.0f}, {0.5f, 0.0f, 0.0f}};
void colorDepthImage(
cv::Mat &imgDepth, cv::Mat &imgColoredDepth, float minRange,
cv::Mat &imgDepth, cv::Mat &imgColoredDepth, float minRange, // NOLINT
float maxRange) {
imgColoredDepth = cv::Mat::zeros(imgDepth.size(), CV_8UC3);
@ -295,7 +308,7 @@ void colorDepthImage(
}
bool colormap(
const std::string &name, unsigned char idx, float &r, float &g, float &b) {
const std::string &name, unsigned char idx, float &r, float &g, float &b) { // NOLINT
if (name.compare("jet") == 0) {
float *color = colormapJet[idx];
@ -434,8 +447,8 @@ std::vector<cv::Point2i> bresCircle(int x0, int y0, int r) {
}
void fitCircle(
const std::vector<cv::Point2d> &points, double &centerX, double &centerY,
double &radius) {
const std::vector<cv::Point2d> &points, double &centerX, double &centerY, // NOLINT
double &radius) { // NOLINT
// D. Umbach, and K. Jones, A Few Methods for Fitting Circles to Data,
// IEEE Transactions on Instrumentation and Measurement, 2000
// We use the modified least squares method.
@ -573,8 +586,8 @@ char UTMLetterDesignator(double latitude) {
}
void LLtoUTM(
double latitude, double longitude, double &utmNorthing, double &utmEasting,
std::string &utmZone) {
double latitude, double longitude, double &utmNorthing, double &utmEasting, // NOLINT
std::string &utmZone) { // NOLINT
// converts lat/long to UTM coords. Equations from USGS Bulletin 1532
// East Longitudes are positive, West longitudes are negative.
// North latitudes are positive, South latitudes are negative
@ -610,7 +623,7 @@ void LLtoUTM(
ZoneNumber = 37;
}
LongOrigin = static_cast<double>(
(ZoneNumber - 1) * 6 - 180 + 3); //+3 puts origin in middle of zone
(ZoneNumber - 1) * 6 - 180 + 3); // +3 puts origin in middle of zone
LongOriginRad = LongOrigin * M_PI / 180.0;
// compute the UTM Zone from the latitude and longitude
@ -659,7 +672,7 @@ void LLtoUTM(
void UTMtoLL(
double utmNorthing, double utmEasting, const std::string &utmZone,
double &latitude, double &longitude) {
double &latitude, double &longitude) { // NOLINT
// converts UTM coords to lat/long. Equations from USGS Bulletin 1532
// East Longitudes are positive, West longitudes are negative.
// North latitudes are positive, South latitudes are negative
@ -691,7 +704,7 @@ void UTMtoLL(
}
LongOrigin = (ZoneNumber - 1.0) * 6.0 - 180.0 +
3.0; //+3 puts origin in middle of zone
3.0; // +3 puts origin in middle of zone
eccPrimeSquared = WGS84_ECCSQ / (1.0 - WGS84_ECCSQ);
@ -732,22 +745,22 @@ void UTMtoLL(
longitude = LongOrigin + longitude / M_PI * 180.0;
}
long int timestampDiff(uint64_t t1, uint64_t t2) {
long int timestampDiff(uint64_t t1, uint64_t t2) { // NOLINT
if (t2 > t1) {
uint64_t d = t2 - t1;
if (d > std::numeric_limits<long int>::max()) {
return std::numeric_limits<long int>::max();
if (d > std::numeric_limits<long int>::max()) { // NOLINT
return std::numeric_limits<long int>::max(); // NOLINT
} else {
return d;
}
} else {
uint64_t d = t1 - t2;
if (d > std::numeric_limits<long int>::max()) {
return std::numeric_limits<long int>::min();
if (d > std::numeric_limits<long int>::max()) { // NOLINT
return std::numeric_limits<long int>::min(); // NOLINT
} else {
return -static_cast<long int>(d);
return -static_cast<long int>(d); // NOLINT
}
}
}