opencv c++ feature matching
#include <opencv2/opencv.hpp>
#include <opencv2/features2d.hpp>
//-- Initialize Detector & Matcher
cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create(cv::DescriptorMatcher::BRUTEFORCE_SL2);
//^ Configure the feature detector type like SURF, BRISK etc.
//^ Depending on if the descriptors are binary or floats, can also use hamming distance matcher.
//-- Extract Features
// Input images: cv::Mat image_1_gray, image_2_gray
std::vector<cv::KeyPoint> keypoints1, keypoints2;
cv::Mat descriptors1, descriptors2;
detector->detectAndCompute( image_1_gray, cv::Mat(), keypoints1, descriptors1 );
detector->detectAndCompute( image_2_gray, cv::Mat(), keypoints2, descriptors2 );
//-- Matcher
std::vector< std::vector< cv::DMatch> > knn_matches;
matcher->knnMatch( descriptors1, descriptors2, knn_matches, 2 );
//-- Lowe's Ratio Test
const float ratio_thresh = 0.7f;
std::vector< cv::DMatch > good_matches; //result of nearest neighbour + Lowe's ratio test
good_matches.clear();
for (size_t i = 0; i < knn_matches.size(); i++)
{
if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
{
good_matches.push_back(knn_matches[i][0]);
}
}
//-- Fundamental Matrix Test
std::vector< cv::Point2f > matched_1, matched_2;
std::vector<uchar> status; status.clear();
if( good_matches.size() > 20 ) {
CvPlottingUtils::dmatch_2_cvpoint2f( keypoints1, keypoints2, good_matches, matched_1, matched_2 );
cv::findFundamentalMat( matched_1, matched_2, cv::FM_RANSAC, 3.0, 0.99, status );
}
std::vector< cv::Point2f > matched_1_filtered, matched_2_filtered;
matched_1_filtered.clear(); matched_2_filtered.clear();
for( auto y=0 ; y<status.size() ; y++ ) {
if( status.at(y) > 0u ) {
matched_1_filtered.push_back( matched_1.at(y) );
matched_2_filtered.push_back( matched_2.at(y) );
}
}
//-- Done..!
// Needed utility function.
class CvPlottingUtils
{
public:
static void dmatch_2_cvpoint2f( const std::vector<cv::KeyPoint>& kp1, const std::vector<cv::KeyPoint>& kp2,
const std::vector<cv::DMatch> matches,
std::vector<cv::Point2f>& mp1, std::vector<cv::Point2f>& mp2
)
{
EXPECT_GT( matches.size(), 0 );
EXPECT_GT( kp1.size() , 0 );
EXPECT_GT( kp2.size() , 0 );
// M1 = Eigen::MatrixXd::Constant( (make_homogeneous?3:2), matches.size(), 1.0 );
// M2 = Eigen::MatrixXd::Constant( (make_homogeneous?3:2), matches.size(), 1.0 );
mp1.clear();
mp2.clear();
for( int i=0 ; i<matches.size() ; i++ ) {
int queryIdx = matches.at(i).queryIdx; //kp1
int trainIdx = matches.at(i).trainIdx; //kp2
// assert( queryIdx >=0 && queryIdx < kp1.size() );
// assert( trainIdx >=0 && trainIdx < kp2.size() );
EXPECT_GE( queryIdx, 0); EXPECT_LT( queryIdx, kp1.size() );
EXPECT_GE( trainIdx, 0); EXPECT_LT( trainIdx, kp2.size() );
float _p1_x = kp1.at(queryIdx).pt.x;
float _p1_y = kp1.at(queryIdx).pt.y;
float _p2_x = kp2.at(trainIdx).pt.x;
float _p2_y = kp2.at(trainIdx).pt.y;
mp1.push_back( cv::Point2f(_p1_x,_p1_y) );
mp2.push_back( cv::Point2f(_p2_x, _p2_y) );
}
EXPECT_EQ( mp1.size(), mp2.size() );
}
};