OpenCV Orb not finding matches once rotation/scale invariances are introduced

KLowe picture KLowe · Mar 2, 2012 · Viewed 23.8k times · Source

I am working on a project using the Orb feature detector in OpenCV 2.3.1 . I am finding matches between 8 different images, 6 of which are very similar (20 cm difference in camera position, along a linear slider so there is no scale or rotational variance), and then 2 images taken from about a 45 degree angle from either side. My code is finding plenty of accurate matches between the very similar images, but few to none for the images taken from a more different perspective. I've included what I think are the pertinent parts of my code, please let me know if you need more information.

// set parameters

int numKeyPoints = 1500;
float distThreshold = 15.0;

//instantiate detector, extractor, matcher

detector = new cv::OrbFeatureDetector(numKeyPoints);
extractor = new cv::OrbDescriptorExtractor;
matcher = new cv::BruteForceMatcher<cv::HammingLUT>;

//Load input image detect keypoints

cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints
cv::Mat img2_descriptors;
img1 = cv::imread(fList[0].string(), CV_LOAD_IMAGE_GRAYSCALE);
img2 = cv::imread(fList[1].string(), CV_LOAD_IMAGE_GRAYSCALE);
detector->detect(img1, img1_keypoints);
detector->detect(img2, img2_keypoints);
extractor->compute(img1, img1_keypoints, img1_descriptors);
extractor->compute(img2, img2_keypoints, img2_descriptors);

//Match keypoints using knnMatch to find the single best match for each keypoint
//Then cull results that fall below given distance threshold

std::vector<std::vector<cv::DMatch> > matches;
matcher->knnMatch(img1_descriptors, img2_descriptors, matches, 1);
int matchCount=0;
for (int n=0; n<matches.size(); ++n) {
    if (matches[n].size() > 0){
        if (matches[n][0].distance > distThreshold){
            matches[n].erase(matches[n].begin());
        }else{
            ++matchCount;
        }
    }
}

Answer

KLowe picture KLowe · Mar 27, 2012

I ended up getting enough useful matches by changing my process for filtering matches. My previous method was discarding a lot of good matches based solely on their distance value. This RobustMatcher class that I found in the OpenCV2 Computer Vision Application Programming Cookbook ended up working great. Now that all of my matches are accurate, I've been able to get good enough results by bumping up the number of keypoints that the ORB detector is looking. Using the RobustMatcher with SIFT or SURF still gives much better results, but I'm getting usable data with ORB now.

//RobustMatcher class taken from OpenCV2 Computer Vision Application Programming Cookbook Ch 9
class RobustMatcher {
  private:
     // pointer to the feature point detector object
     cv::Ptr<cv::FeatureDetector> detector;
     // pointer to the feature descriptor extractor object
     cv::Ptr<cv::DescriptorExtractor> extractor;
     // pointer to the matcher object
     cv::Ptr<cv::DescriptorMatcher > matcher;
     float ratio; // max ratio between 1st and 2nd NN
     bool refineF; // if true will refine the F matrix
     double distance; // min distance to epipolar
     double confidence; // confidence level (probability)
  public:
     RobustMatcher() : ratio(0.65f), refineF(true),
                       confidence(0.99), distance(3.0) {
        // ORB is the default feature
        detector= new cv::OrbFeatureDetector();
        extractor= new cv::OrbDescriptorExtractor();
        matcher= new cv::BruteForceMatcher<cv::HammingLUT>;
     }

  // Set the feature detector
  void setFeatureDetector(
         cv::Ptr<cv::FeatureDetector>& detect) {
     detector= detect;
  }
  // Set the descriptor extractor
  void setDescriptorExtractor(
         cv::Ptr<cv::DescriptorExtractor>& desc) {
     extractor= desc;
  }
  // Set the matcher
  void setDescriptorMatcher(
         cv::Ptr<cv::DescriptorMatcher>& match) {
     matcher= match;
  }
  // Set confidence level
  void setConfidenceLevel(
         double conf) {
     confidence= conf;
  }
  //Set MinDistanceToEpipolar
  void setMinDistanceToEpipolar(
         double dist) {
     distance= dist;
  }
  //Set ratio
  void setRatio(
         float rat) {
     ratio= rat;
  }

  // Clear matches for which NN ratio is > than threshold
  // return the number of removed points
  // (corresponding entries being cleared,
  // i.e. size will be 0)
  int ratioTest(std::vector<std::vector<cv::DMatch> >
                                               &matches) {
    int removed=0;
      // for all matches
    for (std::vector<std::vector<cv::DMatch> >::iterator
             matchIterator= matches.begin();
         matchIterator!= matches.end(); ++matchIterator) {
           // if 2 NN has been identified
           if (matchIterator->size() > 1) {
               // check distance ratio
               if ((*matchIterator)[0].distance/
                   (*matchIterator)[1].distance > ratio) {
                  matchIterator->clear(); // remove match
                  removed++;
               }
           } else { // does not have 2 neighbours
               matchIterator->clear(); // remove match
               removed++;
           }
    }
    return removed;
  }

  // Insert symmetrical matches in symMatches vector
  void symmetryTest(
      const std::vector<std::vector<cv::DMatch> >& matches1,
      const std::vector<std::vector<cv::DMatch> >& matches2,
      std::vector<cv::DMatch>& symMatches) {
    // for all matches image 1 -> image 2
    for (std::vector<std::vector<cv::DMatch> >::
             const_iterator matchIterator1= matches1.begin();
         matchIterator1!= matches1.end(); ++matchIterator1) {
       // ignore deleted matches
       if (matchIterator1->size() < 2)
           continue;
       // for all matches image 2 -> image 1
       for (std::vector<std::vector<cv::DMatch> >::
          const_iterator matchIterator2= matches2.begin();
           matchIterator2!= matches2.end();
           ++matchIterator2) {
           // ignore deleted matches
           if (matchIterator2->size() < 2)
              continue;
           // Match symmetry test
           if ((*matchIterator1)[0].queryIdx ==
               (*matchIterator2)[0].trainIdx &&
               (*matchIterator2)[0].queryIdx ==
               (*matchIterator1)[0].trainIdx) {
               // add symmetrical match
                 symMatches.push_back(
                   cv::DMatch((*matchIterator1)[0].queryIdx,
                             (*matchIterator1)[0].trainIdx,
                             (*matchIterator1)[0].distance));
                 break; // next match in image 1 -> image 2
           }
       }
    }
  }

  // Identify good matches using RANSAC
  // Return fundemental matrix
  cv::Mat ransacTest(
      const std::vector<cv::DMatch>& matches,
      const std::vector<cv::KeyPoint>& keypoints1,
      const std::vector<cv::KeyPoint>& keypoints2,
      std::vector<cv::DMatch>& outMatches) {
   // Convert keypoints into Point2f
   std::vector<cv::Point2f> points1, points2;
   cv::Mat fundemental;
   for (std::vector<cv::DMatch>::
         const_iterator it= matches.begin();
       it!= matches.end(); ++it) {
       // Get the position of left keypoints
       float x= keypoints1[it->queryIdx].pt.x;
       float y= keypoints1[it->queryIdx].pt.y;
       points1.push_back(cv::Point2f(x,y));
       // Get the position of right keypoints
       x= keypoints2[it->trainIdx].pt.x;
       y= keypoints2[it->trainIdx].pt.y;
       points2.push_back(cv::Point2f(x,y));
    }
   // Compute F matrix using RANSAC
   std::vector<uchar> inliers(points1.size(),0);
   if (points1.size()>0&&points2.size()>0){
      cv::Mat fundemental= cv::findFundamentalMat(
         cv::Mat(points1),cv::Mat(points2), // matching points
          inliers,       // match status (inlier or outlier)
          CV_FM_RANSAC, // RANSAC method
          distance,      // distance to epipolar line
          confidence); // confidence probability
      // extract the surviving (inliers) matches
      std::vector<uchar>::const_iterator
                         itIn= inliers.begin();
      std::vector<cv::DMatch>::const_iterator
                         itM= matches.begin();
      // for all matches
      for ( ;itIn!= inliers.end(); ++itIn, ++itM) {
         if (*itIn) { // it is a valid match
             outMatches.push_back(*itM);
          }
       }
       if (refineF) {
       // The F matrix will be recomputed with
       // all accepted matches
          // Convert keypoints into Point2f
          // for final F computation
          points1.clear();
          points2.clear();
          for (std::vector<cv::DMatch>::
                 const_iterator it= outMatches.begin();
              it!= outMatches.end(); ++it) {
              // Get the position of left keypoints
              float x= keypoints1[it->queryIdx].pt.x;
              float y= keypoints1[it->queryIdx].pt.y;
              points1.push_back(cv::Point2f(x,y));
              // Get the position of right keypoints
              x= keypoints2[it->trainIdx].pt.x;
              y= keypoints2[it->trainIdx].pt.y;
              points2.push_back(cv::Point2f(x,y));
          }
          // Compute 8-point F from all accepted matches
          if (points1.size()>0&&points2.size()>0){
             fundemental= cv::findFundamentalMat(
                cv::Mat(points1),cv::Mat(points2), // matches
                CV_FM_8POINT); // 8-point method
          }
       }
    }
    return fundemental;
  }

  // Match feature points using symmetry test and RANSAC
  // returns fundemental matrix
  cv::Mat match(cv::Mat& image1,
                cv::Mat& image2, // input images
     // output matches and keypoints
     std::vector<cv::DMatch>& matches,
     std::vector<cv::KeyPoint>& keypoints1,
     std::vector<cv::KeyPoint>& keypoints2) {
   // 1a. Detection of the SURF features
   detector->detect(image1,keypoints1);
   detector->detect(image2,keypoints2);
   // 1b. Extraction of the SURF descriptors
   cv::Mat descriptors1, descriptors2;
   extractor->compute(image1,keypoints1,descriptors1);
   extractor->compute(image2,keypoints2,descriptors2);
   // 2. Match the two image descriptors
   // Construction of the matcher
   //cv::BruteForceMatcher<cv::L2<float>> matcher;
   // from image 1 to image 2
   // based on k nearest neighbours (with k=2)
   std::vector<std::vector<cv::DMatch> > matches1;
   matcher->knnMatch(descriptors1,descriptors2,
       matches1, // vector of matches (up to 2 per entry)
       2);        // return 2 nearest neighbours
    // from image 2 to image 1
    // based on k nearest neighbours (with k=2)
    std::vector<std::vector<cv::DMatch> > matches2;
    matcher->knnMatch(descriptors2,descriptors1,
       matches2, // vector of matches (up to 2 per entry)
       2);        // return 2 nearest neighbours
    // 3. Remove matches for which NN ratio is
    // > than threshold
    // clean image 1 -> image 2 matches
    int removed= ratioTest(matches1);
    // clean image 2 -> image 1 matches
    removed= ratioTest(matches2);
    // 4. Remove non-symmetrical matches
    std::vector<cv::DMatch> symMatches;
    symmetryTest(matches1,matches2,symMatches);
    // 5. Validate matches using RANSAC
    cv::Mat fundemental= ransacTest(symMatches,
                keypoints1, keypoints2, matches);
    // return the found fundemental matrix
    return fundemental;
  }
};


// set parameters

int numKeyPoints = 1500;

//Instantiate robust matcher

RobustMatcher rmatcher;

//instantiate detector, extractor, matcher

detector = new cv::OrbFeatureDetector(numKeyPoints);
extractor = new cv::OrbDescriptorExtractor;
matcher = new cv::BruteForceMatcher<cv::HammingLUT>;

rmatcher.setFeatureDetector(detector);
rmatcher.setDescriptorExtractor(extractor);
rmatcher.setDescriptorMatcher(matcher);

//Load input image detect keypoints

cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints
cv::Mat img2_descriptors;
std::vector<std::vector<cv::DMatch> > matches;
img1 = cv::imread(fList[0].string(), CV_LOAD_IMAGE_GRAYSCALE);
img2 = cv::imread(fList[1].string(), CV_LOAD_IMAGE_GRAYSCALE);

rmatcher.match(img1, img2, matches, img1_keypoints, img2_keypoints);