42 #include <visp3/core/vpIoTools.h>
43 #include <visp3/vision/vpKeyPoint.h>
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 #include <opencv2/calib3d/calib3d.hpp>
51 #include <pugixml.hpp>
56 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
58 if (knnMatches.size() > 0) {
65 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
83 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
84 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
85 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
86 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
87 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
88 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
89 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
90 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
91 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
92 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
93 m_useAffineDetection(false),
94 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
95 m_useBruteForceCrossCheck(true),
97 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
98 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
102 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
103 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
119 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
120 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
121 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
122 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
123 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
124 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
125 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
126 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
127 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
128 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
129 m_useAffineDetection(false),
130 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
131 m_useBruteForceCrossCheck(true),
133 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
134 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
138 m_detectorNames.push_back(detectorName);
139 m_extractorNames.push_back(extractorName);
155 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
156 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
157 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
158 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
159 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
160 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
161 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
162 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(),
163 m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
164 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
165 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
166 m_useBruteForceCrossCheck(true),
168 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
169 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
183 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
188 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
190 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
193 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
198 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
200 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
201 cv::Mat tcorners = corners * A.t();
202 cv::Mat tcorners_x, tcorners_y;
203 tcorners.col(0).copyTo(tcorners_x);
204 tcorners.col(1).copyTo(tcorners_y);
205 std::vector<cv::Mat> channels;
206 channels.push_back(tcorners_x);
207 channels.push_back(tcorners_y);
208 cv::merge(channels, tcorners);
210 cv::Rect rect = cv::boundingRect(tcorners);
211 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
213 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
216 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
217 double s = 0.8 * sqrt(tilt * tilt - 1);
218 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
219 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
220 A.row(0) = A.row(0) / tilt;
223 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
224 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
227 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
229 cv::invertAffineTransform(A, Ai);
258 unsigned int height,
unsigned int width)
273 unsigned int height,
unsigned int width)
289 m_trainPoints.clear();
290 m_mapOfImageId.clear();
291 m_mapOfImages.clear();
292 m_currentImageId = 1;
294 if (m_useAffineDetection) {
295 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
296 std::vector<cv::Mat> listOfTrainDescriptors;
302 m_trainKeyPoints.clear();
303 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
304 it != listOfTrainKeyPoints.end(); ++it) {
305 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
309 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
312 it->copyTo(m_trainDescriptors);
314 m_trainDescriptors.push_back(*it);
318 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
319 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
324 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
325 m_mapOfImageId[it->class_id] = m_currentImageId;
329 m_mapOfImages[m_currentImageId] = I;
338 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
340 return static_cast<unsigned int>(m_trainKeyPoints.size());
368 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
370 cv::Mat trainDescriptors;
372 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
374 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
376 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
380 std::map<size_t, size_t> mapOfKeypointHashes;
382 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
384 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
387 std::vector<cv::Point3f> trainPoints_tmp;
388 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
389 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
390 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
395 points3f = trainPoints_tmp;
398 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
413 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
415 cv::Mat trainDescriptors;
417 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
419 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
421 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
425 std::map<size_t, size_t> mapOfKeypointHashes;
427 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
429 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
432 std::vector<cv::Point3f> trainPoints_tmp;
433 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
434 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
435 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
440 points3f = trainPoints_tmp;
443 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
460 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
461 bool append,
int class_id)
464 m_trainPoints.clear();
465 m_mapOfImageId.clear();
466 m_mapOfImages.clear();
467 m_currentImageId = 0;
468 m_trainKeyPoints.clear();
473 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
475 if (class_id != -1) {
476 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
477 it->class_id = class_id;
483 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
484 m_mapOfImageId[it->class_id] = m_currentImageId;
488 m_mapOfImages[m_currentImageId] = I;
491 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
493 trainDescriptors.copyTo(m_trainDescriptors);
495 m_trainDescriptors.push_back(trainDescriptors);
497 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
505 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
509 return static_cast<unsigned int>(m_trainKeyPoints.size());
525 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
526 bool append,
int class_id)
529 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
551 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
558 vpPlane Po(pts[0], pts[1], pts[2]);
559 double xc = 0.0, yc = 0.0;
570 point_obj = cMo.
inverse() * point_cam;
571 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
593 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
600 vpPlane Po(pts[0], pts[1], pts[2]);
601 double xc = 0.0, yc = 0.0;
612 point_obj = cMo.
inverse() * point_cam;
633 std::vector<cv::KeyPoint> &candidates,
634 const std::vector<vpPolygon> &polygons,
635 const std::vector<std::vector<vpPoint> > &roisPt,
636 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
638 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
645 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
646 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
647 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
651 std::vector<vpPolygon> polygons_tmp = polygons;
652 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
653 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
655 while (it2 != pairOfCandidatesToCheck.end()) {
656 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
657 if (it1->isInside(imPt)) {
658 candidates.push_back(it2->first);
660 points.push_back(pt);
662 if (descriptors != NULL) {
663 desc.push_back(descriptors->row((
int)it2->second));
667 it2 = pairOfCandidatesToCheck.erase(it2);
674 if (descriptors != NULL) {
675 desc.copyTo(*descriptors);
696 std::vector<vpImagePoint> &candidates,
697 const std::vector<vpPolygon> &polygons,
698 const std::vector<std::vector<vpPoint> > &roisPt,
699 std::vector<vpPoint> &points, cv::Mat *descriptors)
701 std::vector<vpImagePoint> candidatesToCheck = candidates;
707 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
708 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
709 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
713 std::vector<vpPolygon> polygons_tmp = polygons;
714 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
715 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
717 while (it2 != pairOfCandidatesToCheck.end()) {
718 if (it1->isInside(it2->first)) {
719 candidates.push_back(it2->first);
721 points.push_back(pt);
723 if (descriptors != NULL) {
724 desc.push_back(descriptors->row((
int)it2->second));
728 it2 = pairOfCandidatesToCheck.erase(it2);
753 const std::vector<vpCylinder> &cylinders,
754 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
755 cv::Mat *descriptors)
757 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
763 size_t cpt_keypoint = 0;
764 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
765 ++it1, cpt_keypoint++) {
766 size_t cpt_cylinder = 0;
769 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
770 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
773 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
775 candidates.push_back(*it1);
779 double xm = 0.0, ym = 0.0;
781 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
783 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
785 point_cam[0] = xm * Z;
786 point_cam[1] = ym * Z;
790 point_obj = cMo.
inverse() * point_cam;
793 points.push_back(cv::Point3f((
float)pt.
get_oX(), (
float)pt.
get_oY(), (
float)pt.
get_oZ()));
795 if (descriptors != NULL) {
796 desc.push_back(descriptors->row((
int)cpt_keypoint));
806 if (descriptors != NULL) {
807 desc.copyTo(*descriptors);
828 const std::vector<vpCylinder> &cylinders,
829 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
830 cv::Mat *descriptors)
832 std::vector<vpImagePoint> candidatesToCheck = candidates;
838 size_t cpt_keypoint = 0;
839 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
840 ++it1, cpt_keypoint++) {
841 size_t cpt_cylinder = 0;
844 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
845 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
848 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
850 candidates.push_back(*it1);
854 double xm = 0.0, ym = 0.0;
856 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
858 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
860 point_cam[0] = xm * Z;
861 point_cam[1] = ym * Z;
865 point_obj = cMo.
inverse() * point_cam;
868 points.push_back(pt);
870 if (descriptors != NULL) {
871 desc.push_back(descriptors->row((
int)cpt_keypoint));
881 if (descriptors != NULL) {
882 desc.copyTo(*descriptors);
905 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
907 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
914 cv::Mat cameraMatrix =
924 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
927 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
929 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
930 (
float)m_ransacReprojectionError,
933 inlierIndex, cv::SOLVEPNP_ITERATIVE);
953 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
954 if (m_useConsensusPercentage) {
955 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
958 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
959 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
961 }
catch (cv::Exception &e) {
962 std::cerr << e.what() << std::endl;
966 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
967 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
996 std::vector<vpPoint> &inliers,
double &elapsedTime,
bool (*func)(
const vpHomogeneousMatrix &))
998 std::vector<unsigned int> inlierIndex;
999 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1016 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
1021 if (objectVpPoints.size() < 4) {
1031 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1035 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
1036 if (m_useConsensusPercentage) {
1037 nbInlierToReachConsensus =
1038 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
1048 bool isRansacPoseEstimationOk =
false;
1055 if (m_computeCovariance) {
1059 std::cerr <<
"e=" << e.
what() << std::endl;
1077 return isRansacPoseEstimationOk;
1094 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1097 if (matchKeyPoints.size() == 0) {
1103 std::vector<double> errors(matchKeyPoints.size());
1106 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1107 it != matchKeyPoints.end(); ++it, cpt++) {
1112 double u = 0.0, v = 0.0;
1114 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1117 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1169 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1171 if (m_mapOfImages.empty()) {
1172 std::cerr <<
"There is no training image loaded !" << std::endl;
1182 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1185 unsigned int nbWidth = nbImgSqrt;
1187 unsigned int nbHeight = nbImgSqrt;
1190 if (nbImgSqrt * nbImgSqrt < nbImg) {
1194 unsigned int maxW = ICurrent.
getWidth();
1195 unsigned int maxH = ICurrent.
getHeight();
1196 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1198 if (maxW < it->second.getWidth()) {
1199 maxW = it->second.getWidth();
1202 if (maxH < it->second.getHeight()) {
1203 maxH = it->second.getHeight();
1224 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1226 if (m_mapOfImages.empty()) {
1227 std::cerr <<
"There is no training image loaded !" << std::endl;
1237 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1240 unsigned int nbWidth = nbImgSqrt;
1242 unsigned int nbHeight = nbImgSqrt;
1245 if (nbImgSqrt * nbImgSqrt < nbImg) {
1249 unsigned int maxW = ICurrent.
getWidth();
1250 unsigned int maxH = ICurrent.
getHeight();
1251 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1253 if (maxW < it->second.getWidth()) {
1254 maxW = it->second.getWidth();
1257 if (maxH < it->second.getHeight()) {
1258 maxH = it->second.getHeight();
1276 detect(I, keyPoints, elapsedTime, rectangle);
1289 detect(I_color, keyPoints, elapsedTime, rectangle);
1300 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
1303 detect(matImg, keyPoints, elapsedTime, mask);
1319 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1322 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1324 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1326 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1329 detect(matImg, keyPoints, elapsedTime, mask);
1345 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1348 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1350 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1352 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1355 detect(matImg, keyPoints, elapsedTime, mask);
1367 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1368 const cv::Mat &mask)
1373 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1374 it != m_detectors.end(); ++it) {
1375 std::vector<cv::KeyPoint> kp;
1376 it->second->detect(matImg, kp, mask);
1377 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1392 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1394 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1397 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1412 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1414 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1417 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1432 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1435 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1449 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1452 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1469 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1472 srand((
unsigned int)time(NULL));
1475 std::vector<vpImagePoint> queryImageKeyPoints;
1477 std::vector<vpImagePoint> trainImageKeyPoints;
1481 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1483 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1486 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1487 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1488 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1507 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1510 srand((
unsigned int)time(NULL));
1513 std::vector<vpImagePoint> queryImageKeyPoints;
1515 std::vector<vpImagePoint> trainImageKeyPoints;
1519 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1521 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1524 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1525 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1526 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1545 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1548 srand((
unsigned int)time(NULL));
1551 std::vector<vpImagePoint> queryImageKeyPoints;
1553 std::vector<vpImagePoint> trainImageKeyPoints;
1557 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1559 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1562 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1563 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1564 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1583 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1584 unsigned int lineThickness)
1586 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1588 std::cerr <<
"There is no training image loaded !" << std::endl;
1594 int nbImg = (int)(m_mapOfImages.size() + 1);
1602 int nbWidth = nbImgSqrt;
1603 int nbHeight = nbImgSqrt;
1605 if (nbImgSqrt * nbImgSqrt < nbImg) {
1609 std::map<int, int> mapOfImageIdIndex;
1612 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1614 mapOfImageIdIndex[it->first] = cpt;
1616 if (maxW < it->second.getWidth()) {
1617 maxW = it->second.getWidth();
1620 if (maxH < it->second.getHeight()) {
1621 maxH = it->second.getHeight();
1627 int medianI = nbHeight / 2;
1628 int medianJ = nbWidth / 2;
1629 int medianIndex = medianI * nbWidth + medianJ;
1630 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1632 int current_class_id_index = 0;
1633 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1634 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1638 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1641 int indexI = current_class_id_index / nbWidth;
1642 int indexJ = current_class_id_index - (indexI * nbWidth);
1643 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1650 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1651 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1656 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1661 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1667 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1668 int current_class_id = 0;
1669 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1670 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1674 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1677 int indexI = current_class_id / nbWidth;
1678 int indexJ = current_class_id - (indexI * nbWidth);
1680 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1681 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1682 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1683 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1704 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1705 unsigned int lineThickness)
1707 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1709 std::cerr <<
"There is no training image loaded !" << std::endl;
1715 int nbImg = (int)(m_mapOfImages.size() + 1);
1723 int nbWidth = nbImgSqrt;
1724 int nbHeight = nbImgSqrt;
1726 if (nbImgSqrt * nbImgSqrt < nbImg) {
1730 std::map<int, int> mapOfImageIdIndex;
1733 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1735 mapOfImageIdIndex[it->first] = cpt;
1737 if (maxW < it->second.getWidth()) {
1738 maxW = it->second.getWidth();
1741 if (maxH < it->second.getHeight()) {
1742 maxH = it->second.getHeight();
1748 int medianI = nbHeight / 2;
1749 int medianJ = nbWidth / 2;
1750 int medianIndex = medianI * nbWidth + medianJ;
1751 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1753 int current_class_id_index = 0;
1754 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1755 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1759 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1762 int indexI = current_class_id_index / nbWidth;
1763 int indexJ = current_class_id_index - (indexI * nbWidth);
1764 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1771 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1772 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1777 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1782 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1788 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1789 int current_class_id = 0;
1790 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1791 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1795 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1798 int indexI = current_class_id / nbWidth;
1799 int indexJ = current_class_id - (indexI * nbWidth);
1801 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1802 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1803 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1804 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1824 std::vector<cv::Point3f> *trainPoints)
1827 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1841 std::vector<cv::Point3f> *trainPoints)
1844 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1858 std::vector<cv::Point3f> *trainPoints)
1861 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1876 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1880 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1895 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1899 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1914 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1919 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1920 itd != m_extractors.end(); ++itd) {
1924 if (trainPoints != NULL && !trainPoints->empty()) {
1927 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1930 itd->second->compute(matImg, keyPoints, descriptors);
1932 if (keyPoints.size() != keyPoints_tmp.size()) {
1936 std::map<size_t, size_t> mapOfKeypointHashes;
1938 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1940 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1943 std::vector<cv::Point3f> trainPoints_tmp;
1944 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1945 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1946 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1951 *trainPoints = trainPoints_tmp;
1955 itd->second->compute(matImg, keyPoints, descriptors);
1960 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1964 itd->second->compute(matImg, keyPoints, desc);
1966 if (keyPoints.size() != keyPoints_tmp.size()) {
1970 std::map<size_t, size_t> mapOfKeypointHashes;
1972 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1974 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1977 std::vector<cv::Point3f> trainPoints_tmp;
1978 cv::Mat descriptors_tmp;
1979 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1980 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1981 if (trainPoints != NULL && !trainPoints->empty()) {
1982 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1985 if (!descriptors.empty()) {
1986 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1991 if (trainPoints != NULL) {
1993 *trainPoints = trainPoints_tmp;
1996 descriptors_tmp.copyTo(descriptors);
2000 if (descriptors.empty()) {
2001 desc.copyTo(descriptors);
2003 cv::hconcat(descriptors, desc, descriptors);
2008 if (keyPoints.size() != (
size_t)descriptors.rows) {
2009 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2017 void vpKeyPoint::filterMatches()
2019 std::vector<cv::KeyPoint> queryKpts;
2020 std::vector<cv::Point3f> trainPts;
2021 std::vector<cv::DMatch> m;
2027 double min_dist = DBL_MAX;
2029 std::vector<double> distance_vec(m_knnMatches.size());
2032 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2033 double dist = m_knnMatches[i][0].distance;
2035 distance_vec[i] = dist;
2037 if (dist < min_dist) {
2044 mean /= m_queryDescriptors.rows;
2047 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2048 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2049 double threshold = min_dist + stdev;
2051 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2052 if (m_knnMatches[i].size() >= 2) {
2055 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2060 double dist = m_knnMatches[i][0].distance;
2063 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2065 if (!m_trainPoints.empty()) {
2066 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
2068 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
2076 double min_dist = DBL_MAX;
2078 std::vector<double> distance_vec(m_matches.size());
2079 for (
size_t i = 0; i < m_matches.size(); i++) {
2080 double dist = m_matches[i].distance;
2082 distance_vec[i] = dist;
2084 if (dist < min_dist) {
2091 mean /= m_queryDescriptors.rows;
2093 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2094 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2104 for (
size_t i = 0; i < m_matches.size(); i++) {
2105 if (m_matches[i].distance <= threshold) {
2106 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2108 if (!m_trainPoints.empty()) {
2109 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
2111 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
2116 if (m_useSingleMatchFilter) {
2119 std::vector<cv::DMatch> mTmp;
2120 std::vector<cv::Point3f> trainPtsTmp;
2121 std::vector<cv::KeyPoint> queryKptsTmp;
2123 std::map<int, int> mapOfTrainIdx;
2125 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2126 mapOfTrainIdx[it->trainIdx]++;
2130 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2131 if (mapOfTrainIdx[it->trainIdx] == 1) {
2132 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
2134 if (!m_trainPoints.empty()) {
2135 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
2137 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
2141 m_filteredMatches = mTmp;
2142 m_objectFilteredPoints = trainPtsTmp;
2143 m_queryFilteredKeyPoints = queryKptsTmp;
2145 m_filteredMatches = m;
2146 m_objectFilteredPoints = trainPts;
2147 m_queryFilteredKeyPoints = queryKpts;
2160 objectPoints = m_objectFilteredPoints;
2186 keyPoints = m_queryFilteredKeyPoints;
2189 keyPoints = m_queryKeyPoints;
2245 void vpKeyPoint::init()
2248 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2250 if (!cv::initModule_nonfree()) {
2251 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2261 initDetectors(m_detectorNames);
2262 initExtractors(m_extractorNames);
2271 void vpKeyPoint::initDetector(
const std::string &detectorName)
2273 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2274 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2276 if (m_detectors[detectorName] == NULL) {
2277 std::stringstream ss_msg;
2278 ss_msg <<
"Fail to initialize the detector: " << detectorName
2279 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2283 std::string detectorNameTmp = detectorName;
2284 std::string pyramid =
"Pyramid";
2285 std::size_t pos = detectorName.find(pyramid);
2286 bool usePyramid =
false;
2287 if (pos != std::string::npos) {
2288 detectorNameTmp = detectorName.substr(pos + pyramid.size());
2292 if (detectorNameTmp ==
"SIFT") {
2293 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2294 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2296 cv::Ptr<cv::FeatureDetector> siftDetector;
2297 if (m_maxFeatures > 0) {
2298 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2299 siftDetector = cv::SIFT::create(m_maxFeatures);
2301 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2304 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2305 siftDetector = cv::SIFT::create();
2307 siftDetector = cv::xfeatures2d::SIFT::create();
2311 m_detectors[detectorNameTmp] = siftDetector;
2313 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
2314 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2317 std::stringstream ss_msg;
2318 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2319 <<
" was not build with xFeatures2d module.";
2322 }
else if (detectorNameTmp ==
"SURF") {
2323 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2324 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2326 m_detectors[detectorNameTmp] = surfDetector;
2328 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
2329 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2332 std::stringstream ss_msg;
2333 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2334 <<
" was not build with xFeatures2d module.";
2337 }
else if (detectorNameTmp ==
"FAST") {
2338 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2340 m_detectors[detectorNameTmp] = fastDetector;
2342 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2344 }
else if (detectorNameTmp ==
"MSER") {
2345 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2347 m_detectors[detectorNameTmp] = fastDetector;
2349 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2351 }
else if (detectorNameTmp ==
"ORB") {
2352 cv::Ptr<cv::FeatureDetector> orbDetector;
2353 if (m_maxFeatures > 0) {
2354 orbDetector = cv::ORB::create(m_maxFeatures);
2357 orbDetector = cv::ORB::create();
2360 m_detectors[detectorNameTmp] = orbDetector;
2362 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
2363 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2365 }
else if (detectorNameTmp ==
"BRISK") {
2366 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2368 m_detectors[detectorNameTmp] = briskDetector;
2370 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
2371 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2373 }
else if (detectorNameTmp ==
"KAZE") {
2374 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2376 m_detectors[detectorNameTmp] = kazeDetector;
2378 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
2379 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2381 }
else if (detectorNameTmp ==
"AKAZE") {
2382 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2384 m_detectors[detectorNameTmp] = akazeDetector;
2386 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
2387 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2389 }
else if (detectorNameTmp ==
"GFTT") {
2390 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2392 m_detectors[detectorNameTmp] = gfttDetector;
2394 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2396 }
else if (detectorNameTmp ==
"SimpleBlob") {
2397 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2399 m_detectors[detectorNameTmp] = simpleBlobDetector;
2401 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2403 }
else if (detectorNameTmp ==
"STAR") {
2404 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2405 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2407 m_detectors[detectorNameTmp] = starDetector;
2409 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2412 std::stringstream ss_msg;
2413 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2414 <<
" was not build with xFeatures2d module.";
2417 }
else if (detectorNameTmp ==
"AGAST") {
2418 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2420 m_detectors[detectorNameTmp] = agastDetector;
2422 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2424 }
else if (detectorNameTmp ==
"MSD") {
2425 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2426 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2427 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2429 m_detectors[detectorNameTmp] = msdDetector;
2431 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2432 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2435 std::stringstream ss_msg;
2436 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2437 <<
" was not build with xFeatures2d module.";
2441 std::stringstream ss_msg;
2442 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
2443 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
2446 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2449 bool detectorInitialized =
false;
2452 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2455 detectorInitialized = !m_detectors[detectorName].empty();
2458 if (!detectorInitialized) {
2459 std::stringstream ss_msg;
2460 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
2461 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2473 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
2475 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2485 void vpKeyPoint::initExtractor(
const std::string &extractorName)
2487 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2488 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2490 if (extractorName ==
"SIFT") {
2491 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2492 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2494 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2495 m_extractors[extractorName] = cv::SIFT::create();
2497 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2500 std::stringstream ss_msg;
2501 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2502 <<
" was not build with xFeatures2d module.";
2505 }
else if (extractorName ==
"SURF") {
2506 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2508 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2510 std::stringstream ss_msg;
2511 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2512 <<
" was not build with xFeatures2d module.";
2515 }
else if (extractorName ==
"ORB") {
2516 m_extractors[extractorName] = cv::ORB::create();
2517 }
else if (extractorName ==
"BRISK") {
2518 m_extractors[extractorName] = cv::BRISK::create();
2519 }
else if (extractorName ==
"FREAK") {
2520 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2521 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2523 std::stringstream ss_msg;
2524 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2525 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2528 }
else if (extractorName ==
"BRIEF") {
2529 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2530 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2532 std::stringstream ss_msg;
2533 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2534 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2537 }
else if (extractorName ==
"KAZE") {
2538 m_extractors[extractorName] = cv::KAZE::create();
2539 }
else if (extractorName ==
"AKAZE") {
2540 m_extractors[extractorName] = cv::AKAZE::create();
2541 }
else if (extractorName ==
"DAISY") {
2542 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2543 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2545 std::stringstream ss_msg;
2546 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2547 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2550 }
else if (extractorName ==
"LATCH") {
2551 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2552 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2554 std::stringstream ss_msg;
2555 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2556 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2559 }
else if (extractorName ==
"LUCID") {
2560 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2565 std::stringstream ss_msg;
2566 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2567 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2570 }
else if (extractorName ==
"VGG") {
2571 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2572 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2573 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2575 std::stringstream ss_msg;
2576 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2577 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2581 std::stringstream ss_msg;
2582 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2583 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2586 }
else if (extractorName ==
"BoostDesc") {
2587 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2588 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2589 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2591 std::stringstream ss_msg;
2592 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2593 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2597 std::stringstream ss_msg;
2598 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2599 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2603 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2607 if (!m_extractors[extractorName]) {
2608 std::stringstream ss_msg;
2609 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2610 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2614 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2615 if (extractorName ==
"SURF") {
2617 m_extractors[extractorName]->set(
"extended", 1);
2628 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2630 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2634 int descriptorType = CV_32F;
2635 bool firstIteration =
true;
2636 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2637 it != m_extractors.end(); ++it) {
2638 if (firstIteration) {
2639 firstIteration =
false;
2640 descriptorType = it->second->descriptorType();
2642 if (descriptorType != it->second->descriptorType()) {
2649 void vpKeyPoint::initFeatureNames()
2652 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2659 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2660 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2662 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2663 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2666 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2669 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2674 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2675 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2679 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2682 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2683 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2684 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2686 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2687 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2690 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2693 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2696 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2697 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2698 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2701 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2702 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2703 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2715 int descriptorType = CV_32F;
2716 bool firstIteration =
true;
2717 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2718 it != m_extractors.end(); ++it) {
2719 if (firstIteration) {
2720 firstIteration =
false;
2721 descriptorType = it->second->descriptorType();
2723 if (descriptorType != it->second->descriptorType()) {
2729 if (matcherName ==
"FlannBased") {
2730 if (m_extractors.empty()) {
2731 std::cout <<
"Warning: No extractor initialized, by default use "
2732 "floating values (CV_32F) "
2733 "for descriptor type !"
2737 if (descriptorType == CV_8U) {
2738 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2739 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2741 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2744 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2745 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2747 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2751 m_matcher = cv::DescriptorMatcher::create(matcherName);
2754 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2755 if (m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
2756 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2761 std::stringstream ss_msg;
2762 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2763 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2780 IMatching.
insert(IRef, topLeftCorner);
2782 IMatching.
insert(ICurrent, topLeftCorner);
2797 IMatching.
insert(IRef, topLeftCorner);
2799 IMatching.
insert(ICurrent, topLeftCorner);
2813 int nbImg = (int)(m_mapOfImages.size() + 1);
2815 if (m_mapOfImages.empty()) {
2816 std::cerr <<
"There is no training image loaded !" << std::endl;
2826 int nbWidth = nbImgSqrt;
2827 int nbHeight = nbImgSqrt;
2829 if (nbImgSqrt * nbImgSqrt < nbImg) {
2834 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2836 if (maxW < it->second.getWidth()) {
2837 maxW = it->second.getWidth();
2840 if (maxH < it->second.getHeight()) {
2841 maxH = it->second.getHeight();
2847 int medianI = nbHeight / 2;
2848 int medianJ = nbWidth / 2;
2849 int medianIndex = medianI * nbWidth + medianJ;
2852 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2854 int local_cpt = cpt;
2855 if (cpt >= medianIndex) {
2860 int indexI = local_cpt / nbWidth;
2861 int indexJ = local_cpt - (indexI * nbWidth);
2862 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2864 IMatching.
insert(it->second, topLeftCorner);
2867 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2868 IMatching.
insert(ICurrent, topLeftCorner);
2883 int nbImg = (int)(m_mapOfImages.size() + 1);
2885 if (m_mapOfImages.empty()) {
2886 std::cerr <<
"There is no training image loaded !" << std::endl;
2898 int nbWidth = nbImgSqrt;
2899 int nbHeight = nbImgSqrt;
2901 if (nbImgSqrt * nbImgSqrt < nbImg) {
2906 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2908 if (maxW < it->second.getWidth()) {
2909 maxW = it->second.getWidth();
2912 if (maxH < it->second.getHeight()) {
2913 maxH = it->second.getHeight();
2919 int medianI = nbHeight / 2;
2920 int medianJ = nbWidth / 2;
2921 int medianIndex = medianI * nbWidth + medianJ;
2924 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2926 int local_cpt = cpt;
2927 if (cpt >= medianIndex) {
2932 int indexI = local_cpt / nbWidth;
2933 int indexJ = local_cpt - (indexI * nbWidth);
2934 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2938 IMatching.
insert(IRef, topLeftCorner);
2941 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2942 IMatching.
insert(ICurrent, topLeftCorner);
2957 m_detectorNames.clear();
2958 m_extractorNames.clear();
2959 m_detectors.clear();
2960 m_extractors.clear();
2962 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2965 xmlp.
parse(configFile);
3029 int startClassId = 0;
3030 int startImageId = 0;
3032 m_trainKeyPoints.clear();
3033 m_trainPoints.clear();
3034 m_mapOfImageId.clear();
3035 m_mapOfImages.clear();
3038 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3039 if (startClassId < it->first) {
3040 startClassId = it->first;
3045 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3047 if (startImageId < it->first) {
3048 startImageId = it->first;
3055 if (!parent.empty()) {
3060 std::ifstream file(filename.c_str(), std::ifstream::binary);
3061 if (!file.is_open()) {
3069 #if !defined(VISP_HAVE_MODULE_IO)
3071 std::cout <<
"Warning: The learning file contains image data that will "
3072 "not be loaded as visp_io module "
3073 "is not available !"
3078 for (
int i = 0; i < nbImgs; i++) {
3086 char *path =
new char[length + 1];
3088 for (
int cpt = 0; cpt < length; cpt++) {
3090 file.read((
char *)(&c),
sizeof(c));
3093 path[length] =
'\0';
3096 #ifdef VISP_HAVE_MODULE_IO
3104 m_mapOfImages[
id + startImageId] = I;
3112 int have3DInfoInt = 0;
3114 bool have3DInfo = have3DInfoInt != 0;
3125 int descriptorType = 5;
3128 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3129 for (
int i = 0; i < nRows; i++) {
3131 float u, v, size, angle, response;
3132 int octave, class_id, image_id;
3141 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3142 m_trainKeyPoints.push_back(keyPoint);
3144 if (image_id != -1) {
3145 #ifdef VISP_HAVE_MODULE_IO
3147 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3157 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3160 for (
int j = 0; j < nCols; j++) {
3162 switch (descriptorType) {
3164 unsigned char value;
3165 file.read((
char *)(&value),
sizeof(value));
3166 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
3171 file.read((
char *)(&value),
sizeof(value));
3172 trainDescriptorsTmp.at<
char>(i, j) = value;
3176 unsigned short int value;
3178 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
3184 trainDescriptorsTmp.at<
short int>(i, j) = value;
3190 trainDescriptorsTmp.at<
int>(i, j) = value;
3196 trainDescriptorsTmp.at<
float>(i, j) = value;
3202 trainDescriptorsTmp.at<
double>(i, j) = value;
3208 trainDescriptorsTmp.at<
float>(i, j) = value;
3214 if (!append || m_trainDescriptors.empty()) {
3215 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3217 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3222 pugi::xml_document doc;
3225 if (!doc.load_file(filename.c_str())) {
3229 pugi::xml_node root_element = doc.document_element();
3231 int descriptorType = CV_32F;
3232 int nRows = 0, nCols = 0;
3235 cv::Mat trainDescriptorsTmp;
3237 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node; first_level_node = first_level_node.next_sibling()) {
3239 std::string name(first_level_node.name());
3240 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
3242 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node; image_info_node = image_info_node.next_sibling()) {
3243 name = std::string(image_info_node.name());
3245 if (name ==
"trainImg") {
3247 int id = image_info_node.attribute(
"image_id").as_int();
3250 #ifdef VISP_HAVE_MODULE_IO
3251 std::string path(image_info_node.text().as_string());
3260 m_mapOfImages[
id + startImageId] = I;
3264 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
3265 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3266 descriptors_info_node = descriptors_info_node.next_sibling()) {
3267 if (descriptors_info_node.type() == pugi::node_element) {
3268 name = std::string(descriptors_info_node.name());
3270 if (name ==
"nrows") {
3271 nRows = descriptors_info_node.text().as_int();
3272 }
else if (name ==
"ncols") {
3273 nCols = descriptors_info_node.text().as_int();
3274 }
else if (name ==
"type") {
3275 descriptorType = descriptors_info_node.text().as_int();
3280 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3281 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
3282 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3283 int octave = 0, class_id = 0, image_id = 0;
3284 double oX = 0.0, oY = 0.0, oZ = 0.0;
3286 std::stringstream ss;
3288 for (pugi::xml_node point_node = first_level_node.first_child(); point_node; point_node = point_node.next_sibling()) {
3289 if (point_node.type() == pugi::node_element) {
3290 name = std::string(point_node.name());
3294 u = point_node.text().as_double();
3295 }
else if (name ==
"v") {
3296 v = point_node.text().as_double();
3297 }
else if (name ==
"size") {
3298 size = point_node.text().as_double();
3299 }
else if (name ==
"angle") {
3300 angle = point_node.text().as_double();
3301 }
else if (name ==
"response") {
3302 response = point_node.text().as_double();
3303 }
else if (name ==
"octave") {
3304 octave = point_node.text().as_int();
3305 }
else if (name ==
"class_id") {
3306 class_id = point_node.text().as_int();
3307 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
3308 (class_id + startClassId));
3309 m_trainKeyPoints.push_back(keyPoint);
3310 }
else if (name ==
"image_id") {
3311 image_id = point_node.text().as_int();
3312 if (image_id != -1) {
3313 #ifdef VISP_HAVE_MODULE_IO
3315 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3318 }
else if (name ==
"oX") {
3319 oX = point_node.text().as_double();
3320 }
else if (name ==
"oY") {
3321 oY = point_node.text().as_double();
3322 }
else if (name ==
"oZ") {
3323 oZ = point_node.text().as_double();
3324 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
3325 }
else if (name ==
"desc") {
3328 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3329 descriptor_value_node = descriptor_value_node.next_sibling()) {
3331 if (descriptor_value_node.type() == pugi::node_element) {
3333 std::string parseStr(descriptor_value_node.text().as_string());
3338 switch (descriptorType) {
3343 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
3350 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
3354 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
3358 ss >> trainDescriptorsTmp.at<
short int>(i, j);
3362 ss >> trainDescriptorsTmp.at<
int>(i, j);
3366 ss >> trainDescriptorsTmp.at<
float>(i, j);
3370 ss >> trainDescriptorsTmp.at<
double>(i, j);
3374 ss >> trainDescriptorsTmp.at<
float>(i, j);
3378 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
3391 if (!append || m_trainDescriptors.empty()) {
3392 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3394 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3404 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3410 m_currentImageId = (int)m_mapOfImages.size();
3422 std::vector<cv::DMatch> &matches,
double &elapsedTime)
3427 m_knnMatches.clear();
3429 if (m_useMatchTrainToQuery) {
3430 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3433 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3434 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3436 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3437 it1 != knnMatchesTmp.end(); ++it1) {
3438 std::vector<cv::DMatch> tmp;
3439 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3440 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3442 m_knnMatches.push_back(tmp);
3445 matches.resize(m_knnMatches.size());
3446 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3449 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3450 matches.resize(m_knnMatches.size());
3451 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3456 if (m_useMatchTrainToQuery) {
3457 std::vector<cv::DMatch> matchesTmp;
3459 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3460 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3462 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3463 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3467 m_matcher->match(queryDescriptors, matches);
3533 if (m_trainDescriptors.empty()) {
3534 std::cerr <<
"Reference is empty." << std::endl;
3536 std::cerr <<
"Reference is not computed." << std::endl;
3538 std::cerr <<
"Matching is not possible." << std::endl;
3543 if (m_useAffineDetection) {
3544 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3545 std::vector<cv::Mat> listOfQueryDescriptors;
3551 m_queryKeyPoints.clear();
3552 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3553 it != listOfQueryKeyPoints.end(); ++it) {
3554 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3558 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3562 it->copyTo(m_queryDescriptors);
3564 m_queryDescriptors.push_back(*it);
3568 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3569 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3572 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3585 m_queryKeyPoints = queryKeyPoints;
3586 m_queryDescriptors = queryDescriptors;
3588 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3591 m_queryFilteredKeyPoints.clear();
3592 m_objectFilteredPoints.clear();
3593 m_filteredMatches.clear();
3597 if (m_useMatchTrainToQuery) {
3599 m_queryFilteredKeyPoints.clear();
3600 m_filteredMatches.clear();
3601 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3602 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3603 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3606 m_queryFilteredKeyPoints = m_queryKeyPoints;
3607 m_filteredMatches = m_matches;
3610 if (!m_trainPoints.empty()) {
3611 m_objectFilteredPoints.clear();
3615 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3617 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3626 return static_cast<unsigned int>(m_filteredMatches.size());
3658 double error, elapsedTime;
3659 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3677 double error, elapsedTime;
3678 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3701 if (m_trainDescriptors.empty()) {
3702 std::cerr <<
"Reference is empty." << std::endl;
3704 std::cerr <<
"Reference is not computed." << std::endl;
3706 std::cerr <<
"Matching is not possible." << std::endl;
3711 if (m_useAffineDetection) {
3712 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3713 std::vector<cv::Mat> listOfQueryDescriptors;
3719 m_queryKeyPoints.clear();
3720 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3721 it != listOfQueryKeyPoints.end(); ++it) {
3722 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3726 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3730 it->copyTo(m_queryDescriptors);
3732 m_queryDescriptors.push_back(*it);
3736 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3737 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3740 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3742 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3745 m_queryFilteredKeyPoints.clear();
3746 m_objectFilteredPoints.clear();
3747 m_filteredMatches.clear();
3751 if (m_useMatchTrainToQuery) {
3753 m_queryFilteredKeyPoints.clear();
3754 m_filteredMatches.clear();
3755 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3756 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3757 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3760 m_queryFilteredKeyPoints = m_queryKeyPoints;
3761 m_filteredMatches = m_matches;
3764 if (!m_trainPoints.empty()) {
3765 m_objectFilteredPoints.clear();
3769 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3771 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3783 m_ransacInliers.clear();
3784 m_ransacOutliers.clear();
3786 if (m_useRansacVVS) {
3787 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3791 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3792 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3796 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3798 double x = 0.0, y = 0.0;
3803 objectVpPoints[cpt] = pt;
3806 std::vector<vpPoint> inliers;
3807 std::vector<unsigned int> inlierIndex;
3809 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3811 std::map<unsigned int, bool> mapOfInlierIndex;
3812 m_matchRansacKeyPointsToPoints.clear();
3814 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3815 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3816 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3817 mapOfInlierIndex[*it] =
true;
3820 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3821 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3822 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3826 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3828 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3829 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3830 m_ransacInliers.begin(), matchRansacToVpImage);
3832 elapsedTime += m_poseTime;
3836 std::vector<cv::Point2f> imageFilteredPoints;
3837 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3838 std::vector<int> inlierIndex;
3839 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3841 std::map<int, bool> mapOfInlierIndex;
3842 m_matchRansacKeyPointsToPoints.clear();
3844 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3845 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3846 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3847 mapOfInlierIndex[*it] =
true;
3850 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3851 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3852 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3856 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3858 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3859 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3860 m_ransacInliers.begin(), matchRansacToVpImage);
3862 elapsedTime += m_poseTime;
3888 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3913 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3914 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3915 double *meanDescriptorDistance,
double *detection_score,
const vpRect &rectangle)
3917 if (imPts1 != NULL && imPts2 != NULL) {
3924 double meanDescriptorDistanceTmp = 0.0;
3925 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3926 meanDescriptorDistanceTmp += (double)it->distance;
3929 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3930 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3932 if (meanDescriptorDistance != NULL) {
3933 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3935 if (detection_score != NULL) {
3936 *detection_score = score;
3939 if (m_filteredMatches.size() >= 4) {
3941 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3943 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3945 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3946 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3947 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3950 std::vector<vpImagePoint> inliers;
3951 if (isPlanarObject) {
3952 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3953 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3955 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3958 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3960 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3961 realPoint.at<
double>(0, 0) = points1[i].x;
3962 realPoint.at<
double>(1, 0) = points1[i].y;
3963 realPoint.at<
double>(2, 0) = 1.f;
3965 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3966 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3967 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3968 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3970 if (reprojectionError < 6.0) {
3971 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3972 if (imPts1 != NULL) {
3973 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3976 if (imPts2 != NULL) {
3977 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3981 }
else if (m_filteredMatches.size() >= 8) {
3982 cv::Mat fundamentalInliers;
3983 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3985 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3986 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3987 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3989 if (imPts1 != NULL) {
3990 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3993 if (imPts2 != NULL) {
3994 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
4000 if (!inliers.empty()) {
4007 double meanU = 0.0, meanV = 0.0;
4008 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
4009 meanU += it->get_u();
4010 meanV += it->get_v();
4013 meanU /= (double)inliers.size();
4014 meanV /= (double)inliers.size();
4016 centerOfGravity.
set_u(meanU);
4017 centerOfGravity.
set_v(meanV);
4025 return meanDescriptorDistanceTmp < m_detectionThreshold;
4027 return score > m_detectionScore;
4055 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4060 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4062 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4066 modelImagePoints[cpt] = imPt;
4075 double meanU = 0.0, meanV = 0.0;
4076 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4077 meanU += it->get_u();
4078 meanV += it->get_v();
4081 meanU /= (double)m_ransacInliers.size();
4082 meanV /= (double)m_ransacInliers.size();
4084 centerOfGravity.
set_u(meanU);
4085 centerOfGravity.
set_v(meanV);
4106 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4107 std::vector<cv::Mat> &listOfDescriptors,
4113 listOfKeypoints.clear();
4114 listOfDescriptors.clear();
4116 for (
int tl = 1; tl < 6; tl++) {
4117 double t = pow(2, 0.5 * tl);
4118 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4119 std::vector<cv::KeyPoint> keypoints;
4120 cv::Mat descriptors;
4122 cv::Mat timg, mask, Ai;
4125 affineSkew(t, phi, timg, mask, Ai);
4128 if(listOfAffineI != NULL) {
4130 bitwise_and(mask, timg, img_disp);
4133 listOfAffineI->push_back(tI);
4137 cv::bitwise_and(mask, timg, img_disp);
4138 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4139 cv::imshow(
"Skew", img_disp );
4143 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4144 it != m_detectors.end(); ++it) {
4145 std::vector<cv::KeyPoint> kp;
4146 it->second->detect(timg, kp, mask);
4147 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4151 extract(timg, keypoints, descriptors, elapsedTime);
4153 for(
unsigned int i = 0; i < keypoints.size(); i++) {
4154 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4155 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4156 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4157 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4160 listOfKeypoints.push_back(keypoints);
4161 listOfDescriptors.push_back(descriptors);
4170 std::vector<std::pair<double, int> > listOfAffineParams;
4171 for (
int tl = 1; tl < 6; tl++) {
4172 double t = pow(2, 0.5 * tl);
4173 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4174 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4178 listOfKeypoints.resize(listOfAffineParams.size());
4179 listOfDescriptors.resize(listOfAffineParams.size());
4181 if (listOfAffineI != NULL) {
4182 listOfAffineI->resize(listOfAffineParams.size());
4185 #ifdef VISP_HAVE_OPENMP
4186 #pragma omp parallel for
4188 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4189 std::vector<cv::KeyPoint> keypoints;
4190 cv::Mat descriptors;
4192 cv::Mat timg, mask, Ai;
4195 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
4197 if (listOfAffineI != NULL) {
4199 bitwise_and(mask, timg, img_disp);
4202 (*listOfAffineI)[(size_t)cpt] = tI;
4207 cv::bitwise_and(mask, timg, img_disp);
4208 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4209 cv::imshow(
"Skew", img_disp );
4213 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4214 it != m_detectors.end(); ++it) {
4215 std::vector<cv::KeyPoint> kp;
4216 it->second->detect(timg, kp, mask);
4217 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4221 extract(timg, keypoints, descriptors, elapsedTime);
4223 for (
size_t i = 0; i < keypoints.size(); i++) {
4224 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4225 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4226 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4227 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4230 listOfKeypoints[(size_t)cpt] = keypoints;
4231 listOfDescriptors[(size_t)cpt] = descriptors;
4247 m_computeCovariance =
false;
4249 m_currentImageId = 0;
4251 m_detectionScore = 0.15;
4252 m_detectionThreshold = 100.0;
4253 m_detectionTime = 0.0;
4254 m_detectorNames.clear();
4255 m_detectors.clear();
4256 m_extractionTime = 0.0;
4257 m_extractorNames.clear();
4258 m_extractors.clear();
4259 m_filteredMatches.clear();
4262 m_knnMatches.clear();
4263 m_mapOfImageId.clear();
4264 m_mapOfImages.clear();
4265 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4266 m_matcherName =
"BruteForce-Hamming";
4268 m_matchingFactorThreshold = 2.0;
4269 m_matchingRatioThreshold = 0.85;
4270 m_matchingTime = 0.0;
4271 m_matchRansacKeyPointsToPoints.clear();
4272 m_nbRansacIterations = 200;
4273 m_nbRansacMinInlierCount = 100;
4274 m_objectFilteredPoints.clear();
4276 m_queryDescriptors = cv::Mat();
4277 m_queryFilteredKeyPoints.clear();
4278 m_queryKeyPoints.clear();
4279 m_ransacConsensusPercentage = 20.0;
4281 m_ransacInliers.clear();
4282 m_ransacOutliers.clear();
4283 m_ransacParallel =
true;
4284 m_ransacParallelNbThreads = 0;
4285 m_ransacReprojectionError = 6.0;
4286 m_ransacThreshold = 0.01;
4287 m_trainDescriptors = cv::Mat();
4288 m_trainKeyPoints.clear();
4289 m_trainPoints.clear();
4290 m_trainVpPoints.clear();
4291 m_useAffineDetection =
false;
4292 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4293 m_useBruteForceCrossCheck =
true;
4295 m_useConsensusPercentage =
false;
4297 m_useMatchTrainToQuery =
false;
4298 m_useRansacVVS =
true;
4299 m_useSingleMatchFilter =
true;
4301 m_detectorNames.push_back(
"ORB");
4302 m_extractorNames.push_back(
"ORB");
4318 if (!parent.empty()) {
4322 std::map<int, std::string> mapOfImgPath;
4323 if (saveTrainingImages) {
4324 #ifdef VISP_HAVE_MODULE_IO
4326 unsigned int cpt = 0;
4328 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4334 std::stringstream ss;
4335 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
4337 switch (m_imageFormat) {
4359 std::string imgFilename = ss.str();
4360 mapOfImgPath[it->first] = imgFilename;
4361 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
4364 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
4365 "are not saved because "
4366 "visp_io module is not available !"
4371 bool have3DInfo = m_trainPoints.size() > 0;
4372 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4378 std::ofstream file(filename.c_str(), std::ofstream::binary);
4379 if (!file.is_open()) {
4384 int nbImgs = (int)mapOfImgPath.size();
4387 #ifdef VISP_HAVE_MODULE_IO
4388 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4394 std::string path = it->second;
4395 int length = (int)path.length();
4398 for (
int cpt = 0; cpt < length; cpt++) {
4399 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
4405 int have3DInfoInt = have3DInfo ? 1 : 0;
4408 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4409 int descriptorType = m_trainDescriptors.type();
4420 for (
int i = 0; i < nRows; i++) {
4421 unsigned int i_ = (
unsigned int)i;
4423 float u = m_trainKeyPoints[i_].pt.x;
4427 float v = m_trainKeyPoints[i_].pt.y;
4431 float size = m_trainKeyPoints[i_].size;
4435 float angle = m_trainKeyPoints[i_].angle;
4439 float response = m_trainKeyPoints[i_].response;
4443 int octave = m_trainKeyPoints[i_].octave;
4447 int class_id = m_trainKeyPoints[i_].class_id;
4451 #ifdef VISP_HAVE_MODULE_IO
4452 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4453 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4462 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4473 for (
int j = 0; j < nCols; j++) {
4475 switch (descriptorType) {
4477 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
4478 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
4482 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
4514 pugi::xml_document doc;
4515 pugi::xml_node node = doc.append_child(pugi::node_declaration);
4516 node.append_attribute(
"version") =
"1.0";
4517 node.append_attribute(
"encoding") =
"UTF-8";
4523 pugi::xml_node root_node = doc.append_child(
"LearningData");
4526 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
4528 #ifdef VISP_HAVE_MODULE_IO
4529 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4530 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
4531 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4532 std::stringstream ss;
4534 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
4539 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
4541 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4542 int descriptorType = m_trainDescriptors.type();
4545 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
4548 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
4551 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
4553 for (
int i = 0; i < nRows; i++) {
4554 unsigned int i_ = (
unsigned int)i;
4555 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
4557 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4558 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4559 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4560 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4561 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4562 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4563 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4565 #ifdef VISP_HAVE_MODULE_IO
4566 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4567 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
4568 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4570 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
4574 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4575 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4576 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4579 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
4581 for (
int j = 0; j < nCols; j++) {
4582 switch (descriptorType) {
4588 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4589 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4597 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4598 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4602 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4603 m_trainDescriptors.at<
unsigned short int>(i, j);
4607 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4608 m_trainDescriptors.at<
short int>(i, j);
4612 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4613 m_trainDescriptors.at<
int>(i, j);
4617 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4618 m_trainDescriptors.at<
float>(i, j);
4622 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4623 m_trainDescriptors.at<
double>(i, j);
4633 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
4637 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4638 #ifndef DOXYGEN_SHOULD_SKIP_THIS
4640 struct KeypointResponseGreaterThanThreshold {
4641 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) {}
4642 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4646 struct KeypointResponseGreater {
4647 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4651 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4655 if (n_points >= 0 && keypoints.size() > (
size_t)n_points) {
4656 if (n_points == 0) {
4662 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4664 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4667 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4668 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4671 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4675 struct RoiPredicate {
4676 RoiPredicate(
const cv::Rect &_r) : r(_r) {}
4678 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4683 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4686 if (borderSize > 0) {
4687 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4690 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4691 RoiPredicate(cv::Rect(
4692 cv::Point(borderSize, borderSize),
4693 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4698 struct SizePredicate {
4699 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4701 bool operator()(
const cv::KeyPoint &keyPt)
const
4703 float size = keyPt.size;
4704 return (size < minSize) || (size > maxSize);
4707 float minSize, maxSize;
4710 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4712 CV_Assert(minSize >= 0);
4713 CV_Assert(maxSize >= 0);
4714 CV_Assert(minSize <= maxSize);
4716 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4722 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) {}
4723 bool operator()(
const cv::KeyPoint &key_pt)
const
4725 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4730 MaskPredicate &operator=(
const MaskPredicate &);
4733 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4738 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4741 struct KeyPoint_LessThan {
4742 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4743 bool operator()(
size_t i,
size_t j)
const
4745 const cv::KeyPoint &kp1 = (*kp)[ i];
4746 const cv::KeyPoint &kp2 = (*kp)[ j];
4748 std::numeric_limits<float>::epsilon())) {
4750 return kp1.pt.x < kp2.pt.x;
4754 std::numeric_limits<float>::epsilon())) {
4756 return kp1.pt.y < kp2.pt.y;
4760 std::numeric_limits<float>::epsilon())) {
4762 return kp1.size > kp2.size;
4766 std::numeric_limits<float>::epsilon())) {
4768 return kp1.angle < kp2.angle;
4772 std::numeric_limits<float>::epsilon())) {
4774 return kp1.response > kp2.response;
4777 if (kp1.octave != kp2.octave) {
4778 return kp1.octave > kp2.octave;
4781 if (kp1.class_id != kp2.class_id) {
4782 return kp1.class_id > kp2.class_id;
4787 const std::vector<cv::KeyPoint> *kp;
4790 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4792 size_t i, j, n = keypoints.size();
4793 std::vector<size_t> kpidx(n);
4794 std::vector<uchar> mask(n, (uchar)1);
4796 for (i = 0; i < n; i++) {
4799 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4800 for (i = 1, j = 0; i < n; i++) {
4801 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4802 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4805 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4806 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4807 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4808 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4815 for (i = j = 0; i < n; i++) {
4818 keypoints[j] = keypoints[i];
4823 keypoints.resize(j);
4829 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &_detector,
4831 : detector(_detector), maxLevel(_maxLevel)
4835 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4837 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4840 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4841 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4843 detectImpl(image.getMat(), keypoints, mask.getMat());
4846 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4847 const cv::Mat &mask)
const
4849 cv::Mat src = image;
4850 cv::Mat src_mask = mask;
4852 cv::Mat dilated_mask;
4853 if (!mask.empty()) {
4854 cv::dilate(mask, dilated_mask, cv::Mat());
4855 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4856 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4857 dilated_mask = mask255;
4860 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4862 std::vector<cv::KeyPoint> new_pts;
4863 detector->detect(src, new_pts, src_mask);
4864 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4865 for (; it != end; ++it) {
4866 it->pt.x *= multiplier;
4867 it->pt.y *= multiplier;
4868 it->size *= multiplier;
4871 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4880 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4885 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4890 #elif !defined(VISP_BUILD_SHARED_LIBS)
4893 void dummy_vpKeyPoint(){};
bool _reference_computed
flag to indicate if the reference has been built.
std::vector< vpImagePoint > currentImagePointsList
std::vector< unsigned int > matchedReferencePoints
std::vector< vpImagePoint > referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionnalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
error that can be emited by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename)
static void write(const vpImage< unsigned char > &I, const std::string &filename)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void initMatcher(const std::string &matcherName)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double s=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
void setCovarianceComputation(const bool &flag)
std::vector< unsigned int > getRansacInlierIndex() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void setUseParallelRansac(bool use)
std::vector< vpPoint > getRansacInliers() const
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()