Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
testKeyPoint-2.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Test keypoint matching and pose estimation.
32 */
33
34#include <iostream>
35
36#include <visp3/core/vpConfig.h>
37
38#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D) && defined(HAVE_OPENCV_VIDEO)
39
40#include <visp3/core/vpImage.h>
41#include <visp3/core/vpIoTools.h>
42#include <visp3/gui/vpDisplayGDI.h>
43#include <visp3/gui/vpDisplayGTK.h>
44#include <visp3/gui/vpDisplayOpenCV.h>
45#include <visp3/gui/vpDisplayX.h>
46#include <visp3/io/vpImageIo.h>
47#include <visp3/io/vpParseArgv.h>
48#include <visp3/io/vpVideoReader.h>
49#include <visp3/mbt/vpMbEdgeTracker.h>
50#include <visp3/vision/vpKeyPoint.h>
51
52// List of allowed command line options
53#define GETOPTARGS "cdph"
54
55void usage(const char *name, const char *badparam);
56bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display);
57
66void usage(const char *name, const char *badparam)
67{
68 fprintf(stdout, "\n\
69Test keypoints matching.\n\
70\n\
71SYNOPSIS\n\
72 %s [-c] [-d] [-p] [-h]\n",
73 name);
74
75 fprintf(stdout, "\n\
76OPTIONS: \n\
77\n\
78 -c\n\
79 Disable the mouse click. Useful to automate the \n\
80 execution of this program without human intervention.\n\
81\n\
82 -d \n\
83 Turn off the display.\n\
84\n\
85 -p \n\
86 Use parallel RANSAC.\n\
87\n\
88 -h\n\
89 Print the help.\n");
90
91 if (badparam)
92 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
93}
94
106bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, bool &use_parallel_ransac)
107{
108 const char *optarg_;
109 int c;
110 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
111
112 switch (c) {
113 case 'c':
114 click_allowed = false;
115 break;
116 case 'd':
117 display = false;
118 break;
119 case 'p':
120 use_parallel_ransac = true;
121 break;
122 case 'h':
123 usage(argv[0], NULL);
124 return false;
125 break;
126
127 default:
128 usage(argv[0], optarg_);
129 return false;
130 break;
131 }
132 }
133
134 if ((c == 1) || (c == -1)) {
135 // standalone param or error
136 usage(argv[0], NULL);
137 std::cerr << "ERROR: " << std::endl;
138 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
139 return false;
140 }
141
142 return true;
143}
144
145template <typename Type>
146void run_test(const std::string &env_ipath, bool opt_click_allowed, bool opt_display, bool use_parallel_ransac,
147 vpImage<Type> &I, vpImage<Type> &IMatching)
148{
149#if VISP_HAVE_DATASET_VERSION >= 0x030600
150 std::string ext("png");
151#else
152 std::string ext("pgm");
153#endif
154 // Set the path location of the image sequence
155 std::string dirname = vpIoTools::createFilePath(env_ipath, "mbt/cube");
156
157 // Build the name of the image files
158 std::string filenameRef = vpIoTools::createFilePath(dirname, "image0000." + ext);
159 vpImageIo::read(I, filenameRef);
160 std::string filenameCur = vpIoTools::createFilePath(dirname, "image%04d." + ext);
161
162#if defined(VISP_HAVE_X11)
163 vpDisplayX display;
164#elif defined(VISP_HAVE_GTK)
165 vpDisplayGTK display;
166#elif defined(VISP_HAVE_GDI)
167 vpDisplayGDI display;
168#elif defined(HAVE_OPENCV_HIGHGUI)
169 vpDisplayOpenCV display;
170#endif
171
172 if (opt_display) {
173 display.setDownScalingFactor(vpDisplay::SCALE_AUTO);
174 display.init(I, 0, 0, "ORB keypoints matching and pose estimation");
175 }
176
178 vpMbEdgeTracker tracker;
179 // Load config for tracker
180 std::string tracker_config_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.xml");
181
182 tracker.loadConfigFile(tracker_config_file);
183 tracker.getCameraParameters(cam);
184#if 0
185 // Corresponding parameters manually set to have an example code
186 vpMe me;
187 me.setMaskSize(5);
188 me.setMaskNumber(180);
189 me.setRange(8);
191 me.setThreshold(20);
192 me.setMu1(0.5);
193 me.setMu2(0.5);
194 me.setSampleStep(4);
195 me.setNbTotalSample(250);
196 tracker.setMovingEdge(me);
197 cam.initPersProjWithoutDistortion(547.7367575, 542.0744058, 338.7036994, 234.5083345);
198 tracker.setCameraParameters(cam);
199 tracker.setNearClippingDistance(0.01);
200 tracker.setFarClippingDistance(100.0);
202#endif
203
204 tracker.setAngleAppear(vpMath::rad(89));
205 tracker.setAngleDisappear(vpMath::rad(89));
206
207 // Load CAO model
208 std::string cao_model_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.cao");
209 tracker.loadModel(cao_model_file);
210
211 // Initialize the pose
212 std::string init_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.init");
213 if (opt_display && opt_click_allowed) {
214 tracker.initClick(I, init_file);
215 }
216 else {
217 vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
218 tracker.initFromPose(I, cMoi);
219 }
220
221 // Get the init pose
223 tracker.getPose(cMo);
224
225 // Init keypoints
226 vpKeyPoint keypoints("ORB", "ORB", "BruteForce-Hamming");
227 keypoints.setRansacParallel(use_parallel_ransac);
228#if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
229 // Bug when using LSH index with FLANN and OpenCV 2.3.1.
230 // see http://code.opencv.org/issues/1741 (Bug #1741)
231 keypoints.setMatcher("FlannBased");
232#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
233 keypoints.setDetectorParameter("ORB", "nLevels", 1);
234#else
235 cv::Ptr<cv::ORB> orb_detector = keypoints.getDetector("ORB").dynamicCast<cv::ORB>();
236 if (orb_detector) {
237 orb_detector->setNLevels(1);
238 }
239#endif
240#endif
241
242 // Detect keypoints on the current image
243 std::vector<cv::KeyPoint> trainKeyPoints;
244 double elapsedTime;
245 keypoints.detect(I, trainKeyPoints, elapsedTime);
246
247 // Keep only keypoints on the cube
248 std::vector<vpPolygon> polygons;
249 std::vector<std::vector<vpPoint> > roisPt;
250 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair =
251 tracker.getPolygonFaces(true); // To detect an issue with CI
252 polygons = pair.first;
253 roisPt = pair.second;
254
255 // Compute the 3D coordinates
256 std::vector<cv::Point3f> points3f;
257 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
258
259 // Build the reference keypoints
260 keypoints.buildReference(I, trainKeyPoints, points3f, false, 1);
261
262 // Read image 150
263 filenameRef = vpIoTools::createFilePath(dirname, "image0150." + ext);
264 vpImageIo::read(I, filenameRef);
265
266 // Init pose at image 150
267 cMo.buildFrom(0.02651282185, -0.03713587374, 0.6873765919, 2.314744454, 0.3492296488, -0.1226054828);
268 tracker.initFromPose(I, cMo);
269
270 // Detect keypoints on the image 150
271 keypoints.detect(I, trainKeyPoints, elapsedTime);
272
273 // Keep only keypoints on the cube
274 pair = tracker.getPolygonFaces(true, true,
275 true); // To detect an issue with CI
276 polygons = pair.first;
277 roisPt = pair.second;
278
279 // Compute the 3D coordinates
280 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
281
282 // Build the reference keypoints
283 keypoints.buildReference(I, trainKeyPoints, points3f, true, 2);
284
285 // Read image 200
286 filenameRef = vpIoTools::createFilePath(dirname, "image0200." + ext);
287 vpImageIo::read(I, filenameRef);
288
289 // Init pose at image 200
290 cMo.buildFrom(0.02965448956, -0.07283091786, 0.7253526051, 2.300529617, -0.4286674806, 0.1788761025);
291 tracker.initFromPose(I, cMo);
292
293 // Detect keypoints on the image 200
294 keypoints.detect(I, trainKeyPoints, elapsedTime);
295
296 // Keep only keypoints on the cube
297 pair = tracker.getPolygonFaces(false); // To detect an issue with CI
298 polygons = pair.first;
299 roisPt = pair.second;
300
301 // Compute the 3D coordinates
302 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
303
304 // Build the reference keypoints
305 keypoints.buildReference(I, trainKeyPoints, points3f, true, 3);
306
307 // Init reader for getting the input image sequence
309 g.setFileName(filenameCur);
310 g.open(I);
311 g.acquire(I);
312
313#if defined(VISP_HAVE_X11)
314 vpDisplayX display2;
315#elif defined(VISP_HAVE_GTK)
316 vpDisplayGTK display2;
317#elif defined(VISP_HAVE_GDI)
318 vpDisplayGDI display2;
319#elif defined(HAVE_OPENCV_HIGHGUI)
320 vpDisplayOpenCV display2;
321#endif
322
323 keypoints.createImageMatching(I, IMatching);
324
325 if (opt_display) {
327 display2.init(IMatching, 0, (int)I.getHeight() / vpDisplay::getDownScalingFactor(I) + 80, "IMatching");
328 }
329
330 bool opt_click = false;
331 double error;
333 std::vector<double> times_vec;
334 while ((opt_display && !g.end()) || (!opt_display && g.getFrameIndex() < 30)) {
335 g.acquire(I);
336
337 if (opt_display) {
339
340 // Display image matching
341 keypoints.insertImageMatching(I, IMatching);
342
343 vpDisplay::display(IMatching);
344 }
345
346 // Match keypoints and estimate the pose
347 if (keypoints.matchPoint(I, cam, cMo, error, elapsedTime)) {
348 times_vec.push_back(elapsedTime);
349
350 tracker.setCameraParameters(cam);
351 tracker.setPose(I, cMo);
352
353 if (opt_display) {
354 tracker.display(I, cMo, cam, vpColor::red, 2);
355 vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
356
357 std::vector<vpImagePoint> ransacInliers = keypoints.getRansacInliers();
358 std::vector<vpImagePoint> ransacOutliers = keypoints.getRansacOutliers();
359
360 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
362 vpImagePoint imPt(*it);
363 imPt.set_u(imPt.get_u() + I.getWidth());
364 imPt.set_v(imPt.get_v() + I.getHeight());
365 vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::green);
366 }
367
368 for (std::vector<vpImagePoint>::const_iterator it = ransacOutliers.begin(); it != ransacOutliers.end(); ++it) {
370 vpImagePoint imPt(*it);
371 imPt.set_u(imPt.get_u() + I.getWidth());
372 imPt.set_v(imPt.get_v() + I.getHeight());
373 vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::red);
374 }
375
376 keypoints.displayMatching(I, IMatching);
377
378 // Display model in the correct sub-image in IMatching
380 cam2.initPersProjWithoutDistortion(cam.get_px(), cam.get_py(), cam.get_u0() + I.getWidth(),
381 cam.get_v0() + I.getHeight());
382 tracker.setCameraParameters(cam2);
383 tracker.setPose(IMatching, cMo);
384 tracker.display(IMatching, cMo, cam2, vpColor::red, 2);
385 vpDisplay::displayFrame(IMatching, cMo, cam2, 0.025, vpColor::none, 3);
386 }
387 }
388
389 if (opt_display) {
391 vpDisplay::flush(IMatching);
392 }
393
394 if (opt_click_allowed && opt_display) {
395 // Click requested to process next image
396 if (opt_click) {
397 vpDisplay::getClick(I, button, true);
398 if (button == vpMouseButton::button3) {
399 opt_click = false;
400 }
401 }
402 else {
403 // Use right click to enable/disable step by step tracking
404 if (vpDisplay::getClick(I, button, false)) {
405 if (button == vpMouseButton::button3) {
406 opt_click = true;
407 }
408 else if (button == vpMouseButton::button1) {
409 break;
410 }
411 }
412 }
413 }
414 }
415
416 if (!times_vec.empty()) {
417 std::cout << "Computation time, Mean: " << vpMath::getMean(times_vec)
418 << " ms ; Median: " << vpMath::getMedian(times_vec) << " ms ; Std: " << vpMath::getStdev(times_vec)
419 << std::endl;
420 }
421}
422
428int main(int argc, const char **argv)
429{
430 try {
431 std::string env_ipath;
432 bool opt_click_allowed = true;
433 bool opt_display = true;
434 bool use_parallel_ransac = false;
435
436 // Read the command line options
437 if (getOptions(argc, argv, opt_click_allowed, opt_display, use_parallel_ransac) == false) {
438 return EXIT_FAILURE;
439 }
440
441 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
442 // environment variable value
444
445 if (env_ipath.empty()) {
446 std::cerr << "Please set the VISP_INPUT_IMAGE_PATH environment "
447 "variable value."
448 << std::endl;
449 return EXIT_FAILURE;
450 }
451
452 {
453 vpImage<unsigned char> I, IMatching;
454
455 std::cout << "-- Test on gray level images" << std::endl;
456
457 run_test(env_ipath, opt_click_allowed, opt_display, use_parallel_ransac, I, IMatching);
458 }
459 {
460 vpImage<vpRGBa> I, IMatching;
461
462 std::cout << "-- Test on color images" << std::endl;
463
464 run_test(env_ipath, opt_click_allowed, opt_display, use_parallel_ransac, I, IMatching);
465 }
466
467 }
468 catch (const vpException &e) {
469 std::cerr << e.what() << std::endl;
470 return EXIT_FAILURE;
471 }
472
473 std::cout << "testKeyPoint-2 is ok !" << std::endl;
474 return EXIT_SUCCESS;
475}
476#else
477int main()
478{
479 std::cerr << "You need OpenCV library." << std::endl;
480
481 return EXIT_SUCCESS;
482}
483
484#endif
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor red
Definition vpColor.h:211
static const vpColor none
Definition vpColor.h:223
static const vpColor green
Definition vpColor.h:214
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
virtual void setDownScalingFactor(unsigned int scale)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
unsigned int getDownScalingFactor()
Definition vpDisplay.h:231
error that can be emitted by ViSP classes.
Definition vpException.h:59
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:135
unsigned int getWidth() const
Definition vpImage.h:242
unsigned int getHeight() const
Definition vpImage.h:184
static std::string getViSPImagesDataPath()
static std::string createFilePath(const std::string &parent, const std::string &child)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition vpKeyPoint.h:212
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
static double rad(double deg)
Definition vpMath.h:116
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:314
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:345
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:294
Make the complete tracking of an object by using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &cam)
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
virtual void setClipping(const unsigned int &flags)
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo)
void setMovingEdge(const vpMe &me)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void getCameraParameters(vpCameraParameters &cam) const
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setAngleDisappear(const double &a)
virtual void initClick(const vpImage< unsigned char > &I, const std::string &initFile, bool displayHelp=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setAngleAppear(const double &a)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual unsigned int getClipping() const
Definition vpMe.h:122
void setMu1(const double &mu_1)
Definition vpMe.h:353
void setSampleStep(const double &s)
Definition vpMe.h:390
void setRange(const unsigned int &r)
Definition vpMe.h:383
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition vpMe.h:445
void setMaskSize(const unsigned int &a)
Definition vpMe.cpp:452
void setNbTotalSample(const int &nb)
Definition vpMe.h:367
void setMu2(const double &mu_2)
Definition vpMe.h:360
@ NORMALIZED_THRESHOLD
Easy-to-use normalized likelihood threshold corresponding to the minimal luminance contrast to consid...
Definition vpMe.h:132
void setMaskNumber(const unsigned int &a)
Definition vpMe.cpp:445
void setThreshold(const double &t)
Definition vpMe.h:435
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)
long getFrameIndex() const