217 constantFactorDistanceThreshold,
219 stdDistanceThreshold,
221 ratioDistanceThreshold,
224 stdAndRatioDistanceThreshold,
248#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
255#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
258#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
259 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
262#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
265#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
270#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
279#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
282#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
286#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
287 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
290#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
293#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
296#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
301#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
303 DESCRIPTOR_BoostDesc,
309 vpKeyPoint(
const vpFeatureDetectorType &detectorType,
const vpFeatureDescriptorType &descriptorType,
310 const std::string &matcherName,
const vpFilterMatchingType &filterType = ratioDistanceThreshold);
311 vpKeyPoint(
const std::string &detectorName =
"ORB",
const std::string &extractorName =
"ORB",
312 const std::string &matcherName =
"BruteForce-Hamming",
313 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
314 vpKeyPoint(
const std::vector<std::string> &detectorNames,
const std::vector<std::string> &extractorNames,
315 const std::string &matcherName =
"BruteForce",
316 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
324 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
326 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
327 bool append =
false,
int class_id = -1);
335 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
337 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
338 bool append =
false,
int class_id = -1);
340 static void compute3D(
const cv::KeyPoint &candidate,
const std::vector<vpPoint> &roi,
const vpCameraParameters &cam,
347 std::vector<cv::KeyPoint> &candidates,
348 const std::vector<vpPolygon> &polygons,
349 const std::vector<std::vector<vpPoint> > &roisPt,
350 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
353 std::vector<vpImagePoint> &candidates,
354 const std::vector<vpPolygon> &polygons,
355 const std::vector<std::vector<vpPoint> > &roisPt,
356 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
360 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
361 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
362 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
366 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
367 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
368 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
370 bool computePose(
const std::vector<cv::Point2f> &imagePoints,
const std::vector<cv::Point3f> &objectPoints,
374 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
377 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
378 std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
391 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask = cv::Mat());
394 void detect(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
396 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
397 const cv::Mat &mask = cv::Mat());
399 void detectExtractAffine(
const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
400 std::vector<cv::Mat> &listOfDescriptors,
411 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
412 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
418 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
419 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
422 std::vector<cv::Point3f> *trainPoints = NULL);
423 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
424 std::vector<cv::Point3f> *trainPoints = NULL);
425 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
426 std::vector<cv::Point3f> *trainPoints = NULL);
428 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
429 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
430 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
431 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
double &elapsedTime,
432 std::vector<cv::Point3f> *trainPoints = NULL);
445 if (!m_computeCovariance) {
446 std::cout <<
"Warning : The covariance matrix has not been computed. "
447 "See setCovarianceComputation() to do it."
452 if (m_computeCovariance && !m_useRansacVVS) {
453 std::cout <<
"Warning : The covariance matrix can only be computed "
454 "with a Virtual Visual Servoing approach."
456 <<
"Use setUseRansacVVS(true) to choose to use a pose "
457 "estimation method based on a Virtual Visual Servoing "
463 return m_covarianceMatrix;
482 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
483 if (it_name == m_mapOfDetectorNames.end()) {
484 std::cerr <<
"Internal problem with the feature type and the "
485 "corresponding name!"
489 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
490 m_detectors.find(it_name->second);
491 if (findDetector != m_detectors.end()) {
492 return findDetector->second;
495 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
496 return cv::Ptr<cv::FeatureDetector>();
506 inline cv::Ptr<cv::FeatureDetector>
getDetector(
const std::string &name)
const
508 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
509 if (findDetector != m_detectors.end()) {
510 return findDetector->second;
513 std::cerr <<
"Cannot find: " << name << std::endl;
514 return cv::Ptr<cv::FeatureDetector>();
520 inline std::map<vpFeatureDetectorType, std::string>
getDetectorNames()
const {
return m_mapOfDetectorNames; }
538 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
539 if (it_name == m_mapOfDescriptorNames.end()) {
540 std::cerr <<
"Internal problem with the feature type and the "
541 "corresponding name!"
545 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
546 m_extractors.find(it_name->second);
547 if (findExtractor != m_extractors.end()) {
548 return findExtractor->second;
551 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
552 return cv::Ptr<cv::DescriptorExtractor>();
562 inline cv::Ptr<cv::DescriptorExtractor>
getExtractor(
const std::string &name)
const
564 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
565 if (findExtractor != m_extractors.end()) {
566 return findExtractor->second;
569 std::cerr <<
"Cannot find: " << name << std::endl;
570 return cv::Ptr<cv::DescriptorExtractor>();
576 inline std::map<vpFeatureDescriptorType, std::string>
getExtractorNames()
const {
return m_mapOfDescriptorNames; }
597 inline cv::Ptr<cv::DescriptorMatcher>
getMatcher()
const {
return m_matcher; }
605 inline std::vector<cv::DMatch>
getMatches()
const {
return m_filteredMatches; }
616 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
617 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
618 matchQueryToTrainKeyPoints.push_back(
619 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx],
620 m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx]));
622 return matchQueryToTrainKeyPoints;
630 inline unsigned int getNbImages()
const {
return static_cast<unsigned int>(m_mapOfImages.size()); }
632 void getObjectPoints(std::vector<cv::Point3f> &objectPoints)
const;
633 void getObjectPoints(std::vector<vpPoint> &objectPoints)
const;
650 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints,
bool matches =
true)
const;
651 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints,
bool matches =
true)
const;
675 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints)
const;
676 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints)
const;
678 void getTrainPoints(std::vector<cv::Point3f> &points)
const;
679 void getTrainPoints(std::vector<vpPoint> &points)
const;
681 void initMatcher(
const std::string &matcherName);
690 void loadConfigFile(
const std::string &configFile);
692 void loadLearningData(
const std::string &filename,
bool binaryMode =
false,
bool append =
false);
694 void match(
const cv::Mat &trainDescriptors,
const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
695 double &elapsedTime);
702 unsigned int matchPoint(
const std::vector<cv::KeyPoint> &queryKeyPoints,
const cv::Mat &queryDescriptors);
710 const bool isPlanarObject =
true, std::vector<vpImagePoint> *imPts1 = NULL,
711 std::vector<vpImagePoint> *imPts2 = NULL,
double *meanDescriptorDistance = NULL,
712 double *detectionScore = NULL,
const vpRect &rectangle =
vpRect());
715 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
731 void saveLearningData(
const std::string &filename,
bool binaryMode =
false,
bool saveTrainingImages =
true);
741 m_computeCovariance = flag;
742 if (!m_useRansacVVS) {
743 std::cout <<
"Warning : The covariance matrix can only be computed "
744 "with a Virtual Visual Servoing approach."
746 <<
"Use setUseRansacVVS(true) to choose to use a pose "
747 "estimation method based on a Virtual "
748 "Visual Servoing approach."
767 m_detectorNames.clear();
768 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
770 initDetector(m_mapOfDetectorNames[detectorType]);
780 m_detectorNames.clear();
781 m_detectorNames.push_back(detectorName);
783 initDetector(detectorName);
786#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
795 template <
typename T1,
typename T2,
typename T3>
796 inline void setDetectorParameter(
const T1 detectorName,
const T2 parameterName,
const T3 value)
798 if (m_detectors.find(detectorName) != m_detectors.end()) {
799 m_detectors[detectorName]->set(parameterName, value);
810 inline void setDetectors(
const std::vector<std::string> &detectorNames)
812 m_detectorNames.clear();
814 m_detectorNames = detectorNames;
815 initDetectors(m_detectorNames);
825 m_extractorNames.clear();
826 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
827 m_extractors.clear();
828 initExtractor(m_mapOfDescriptorNames[extractorType]);
839 m_extractorNames.clear();
840 m_extractorNames.push_back(extractorName);
841 m_extractors.clear();
842 initExtractor(extractorName);
845#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
854 template <
typename T1,
typename T2,
typename T3>
855 inline void setExtractorParameter(
const T1 extractorName,
const T2 parameterName,
const T3 value)
857 if (m_extractors.find(extractorName) != m_extractors.end()) {
858 m_extractors[extractorName]->set(parameterName, value);
871 m_extractorNames.clear();
872 m_extractorNames = extractorNames;
873 m_extractors.clear();
874 initExtractors(m_extractorNames);
901 m_matcherName = matcherName;
902 initMatcher(m_matcherName);
929 m_filterType = filterType;
933 if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
936#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
937 if (m_matcher != NULL && m_matcherName ==
"BruteForce") {
940 m_matcher->set(
"crossCheck",
false);
946#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
947 if (m_matcher != NULL && m_matcherName ==
"BruteForce") {
950 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
965 m_matchingFactorThreshold = factor;
978 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
979 m_matchingRatioThreshold = ratio;
993 if (percentage > 0.0 &&
994 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
995 m_ransacConsensusPercentage = percentage;
1015 m_nbRansacIterations = nbIter;
1045 if (reprojectionError > 0.0) {
1046 m_ransacReprojectionError = reprojectionError;
1049 "threshold must be positive "
1050 "as we deal with distance.");
1062 m_nbRansacMinInlierCount = minCount;
1076 if (threshold > 0.0) {
1077 m_ransacThreshold = threshold;
1092#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1099 inline void setUseBruteForceCrossCheck(
bool useCrossCheck)
1103 if (m_matcher != NULL && !m_useKnn && m_matcherName ==
"BruteForce") {
1104 m_matcher->set(
"crossCheck", useCrossCheck);
1105 }
else if (m_matcher != NULL && m_useKnn && m_matcherName ==
"BruteForce") {
1106 std::cout <<
"Warning, you try to set the crossCheck parameter with a "
1107 "BruteForce matcher but knn is enabled";
1108 std::cout <<
" (the filtering method uses a ratio constraint)" << std::endl;
1150 bool m_computeCovariance;
1154 int m_currentImageId;
1157 vpDetectionMethodType m_detectionMethod;
1159 double m_detectionScore;
1162 double m_detectionThreshold;
1164 double m_detectionTime;
1166 std::vector<std::string> m_detectorNames;
1170 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1172 double m_extractionTime;
1174 std::vector<std::string> m_extractorNames;
1178 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1180 std::vector<cv::DMatch> m_filteredMatches;
1182 vpFilterMatchingType m_filterType;
1184 vpImageFormatType m_imageFormat;
1187 std::vector<std::vector<cv::DMatch> > m_knnMatches;
1189 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1191 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1194 std::map<int, int> m_mapOfImageId;
1197 std::map<int, vpImage<unsigned char> > m_mapOfImages;
1200 cv::Ptr<cv::DescriptorMatcher> m_matcher;
1202 std::string m_matcherName;
1204 std::vector<cv::DMatch> m_matches;
1206 double m_matchingFactorThreshold;
1208 double m_matchingRatioThreshold;
1210 double m_matchingTime;
1212 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1214 int m_nbRansacIterations;
1216 int m_nbRansacMinInlierCount;
1219 std::vector<cv::Point3f> m_objectFilteredPoints;
1224 cv::Mat m_queryDescriptors;
1226 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1228 std::vector<cv::KeyPoint> m_queryKeyPoints;
1231 double m_ransacConsensusPercentage;
1235 std::vector<vpImagePoint> m_ransacInliers;
1237 std::vector<vpImagePoint> m_ransacOutliers;
1239 bool m_ransacParallel;
1241 unsigned int m_ransacParallelNbThreads;
1244 double m_ransacReprojectionError;
1247 double m_ransacThreshold;
1251 cv::Mat m_trainDescriptors;
1253 std::vector<cv::KeyPoint> m_trainKeyPoints;
1256 std::vector<cv::Point3f> m_trainPoints;
1259 std::vector<vpPoint> m_trainVpPoints;
1262 bool m_useAffineDetection;
1263#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1267 bool m_useBruteForceCrossCheck;
1271 bool m_useConsensusPercentage;
1278 bool m_useMatchTrainToQuery;
1280 bool m_useRansacVVS;
1283 bool m_useSingleMatchFilter;
1289 void affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1291 double computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1294 void filterMatches();
1297 void initDetector(
const std::string &detectorNames);
1298 void initDetectors(
const std::vector<std::string> &detectorNames);
1300 void initExtractor(
const std::string &extractorName);
1301 void initExtractors(
const std::vector<std::string> &extractorNames);
1303 void initFeatureNames();
1305 inline size_t myKeypointHash(
const cv::KeyPoint &kp)
1307 size_t _Val = 2166136261U, scale = 16777619U;
1310 _Val = (scale * _Val) ^ u.u;
1312 _Val = (scale * _Val) ^ u.u;
1314 _Val = (scale * _Val) ^ u.u;
1320 _Val = (scale * _Val) ^ u.u;
1321 _Val = (scale * _Val) ^ ((
size_t)kp.octave);
1322 _Val = (scale * _Val) ^ ((
size_t)kp.class_id);
1326#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1332 class PyramidAdaptedFeatureDetector :
public cv::FeatureDetector
1336 PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
int maxLevel = 2);
1339 virtual bool empty()
const;
1342 virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1343 cv::InputArray mask = cv::noArray());
1344 virtual void detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1345 const cv::Mat &mask = cv::Mat())
const;
1347 cv::Ptr<cv::FeatureDetector> detector;
1357 class KeyPointsFilter
1360 KeyPointsFilter() {}
1365 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
int borderSize);
1369 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize = FLT_MAX);
1373 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask);
1377 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1383 static void retainBest(std::vector<cv::KeyPoint> &keypoints,
int npoints);