feat(points_processor.cc): implement equidistant model for pointscloud

This commit is contained in:
Osenberg 2019-01-09 16:00:34 +08:00
parent 9474528972
commit fb1ffb81ca

View File

@ -14,6 +14,8 @@
#include "mynteye/api/processor/points_processor.h"
#include <utility>
#include <vector>
#include <limits>
#include <opencv2/calib3d/calib3d.hpp>
@ -21,6 +23,33 @@
MYNTEYE_BEGIN_NAMESPACE
namespace {
// Encapsulate differences between processing float and uint16_t depths
template<typename T> struct DepthTraits {};
template<>
struct DepthTraits<uint16_t> {
static inline bool valid(uint16_t depth) { return depth != 0; }
static inline float toMeters(uint16_t depth) { return depth * 0.001f; } // originally mm
static inline uint16_t fromMeters(float depth) { return (depth * 1000.0f) + 0.5f; }
static inline void initializeBuffer(std::vector<uint8_t>& buffer) {} // Do nothing - already zero-filled
};
template<>
struct DepthTraits<float> {
static inline bool valid(float depth) { return std::isfinite(depth); }
static inline float toMeters(float depth) { return depth; }
static inline float fromMeters(float depth) { return depth; }
static inline void initializeBuffer(std::vector<uint8_t>& buffer) {
float* start = reinterpret_cast<float*>(&buffer[0]);
float* end = reinterpret_cast<float*>(&buffer[0] + buffer.size());
std::fill(start, end, std::numeric_limits<float>::quiet_NaN());
}
};
}; // namespace
const char PointsProcessor::NAME[] = "PointsProcessor";
PointsProcessor::PointsProcessor(std::int32_t proc_period)
@ -42,6 +71,46 @@ Object *PointsProcessor::OnCreateOutput() {
bool PointsProcessor::OnProcess(
Object *const in, Object *const out, Processor *const parent) {
MYNTEYE_UNUSED(parent)
float fx = 3.6797709792391299e+02;
float fy = 3.6808712539453859e+02;
float cx = 3.7414963027144353e+02;
float cy = 2.3125000326472903e+02;
// Use correct principal point from calibration
float center_x = cx;
float center_y = cy;
// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
double unit_scaling = DepthTraits<float>::toMeters(static_cast<float>(1));
float constant_x = unit_scaling / fx;
float constant_y = unit_scaling / fy;
// float bad_point = std::numeric_limits<float>::quiet_NaN();
const ObjMat *input = Object::Cast<ObjMat>(in);
ObjMat *output = Object::Cast<ObjMat>(out);
output->value.create(input->value.size(), CV_MAKETYPE(CV_32F, 3));
int height = static_cast<int>(output->value.rows);
int width = static_cast<int>(output->value.cols);
for (int v = 0; v < height; ++v) {
cv::Vec3f *dptr = output->value.ptr<cv::Vec3f>(v);
for (int u = 0; u < width; ++u) {
float depth = input->value.at<float>(v, u);
// Missing points denoted by NaNs
if (!DepthTraits<float>::valid(depth)) {
continue;
}
dptr[u][0] = (u - center_x) * depth * constant_x;
dptr[u][1] = (v - center_y) * depth * constant_y;
dptr[u][2] = DepthTraits<float>::toMeters(depth);
}
}
return true;
}