HCY_Graduation_Project/src/Tracking.cc

1939 lines
67 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

/**
* This file is part of ORB-SLAM2.
*
* Copyright (C) 2014-2016 Raúl Mur-Artal <raulmur at unizar dot es> (University of Zaragoza)
* For more information see <https://github.com/raulmur/ORB_SLAM2>
*
* ORB-SLAM2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ORB-SLAM2. If not, see <http://www.gnu.org/licenses/>.
*/
#include "Tracking.h"
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include "ORBmatcher.h"
#include "FrameDrawer.h"
#include "Converter.h"
#include "Map.h"
#include "Initializer.h"
#include "Optimizer.h"
#include "PnPsolver.h"
#include <unistd.h>
#include <iostream>
#include <mutex>
using namespace std;
namespace ORB_SLAM2
{
Tracking::Tracking(System *pSys, ORBVocabulary *pVoc, FrameDrawer *pFrameDrawer, MapDrawer *pMapDrawer, Map *pMap, KeyFrameDatabase *pKFDB, const string &strSettingPath, const int sensor) : mState(NO_IMAGES_YET), mSensor(sensor), mbOnlyTracking(false), mbVO(false), mpORBVocabulary(pVoc),
mpKeyFrameDB(pKFDB), mpInitializer(static_cast<Initializer *>(NULL)), mpSystem(pSys), mpViewer(NULL),
mpFrameDrawer(pFrameDrawer), mpMapDrawer(pMapDrawer), mpMap(pMap), mnLastRelocFrameId(0)
{
// Load camera parameters from settings file
cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ);
float fx = fSettings["Camera.fx"];
float fy = fSettings["Camera.fy"];
float cx = fSettings["Camera.cx"];
float cy = fSettings["Camera.cy"];
cv::Mat K = cv::Mat::eye(3, 3, CV_32F);
K.at<float>(0, 0) = fx;
K.at<float>(1, 1) = fy;
K.at<float>(0, 2) = cx;
K.at<float>(1, 2) = cy;
K.copyTo(mK);
cv::Mat DistCoef(4, 1, CV_32F);
DistCoef.at<float>(0) = fSettings["Camera.k1"];
DistCoef.at<float>(1) = fSettings["Camera.k2"];
DistCoef.at<float>(2) = fSettings["Camera.p1"];
DistCoef.at<float>(3) = fSettings["Camera.p2"];
const float k3 = fSettings["Camera.k3"];
if (k3 != 0)
{
DistCoef.resize(5);
DistCoef.at<float>(4) = k3;
}
DistCoef.copyTo(mDistCoef);
mbf = fSettings["Camera.bf"];
float fps = fSettings["Camera.fps"];
if (fps == 0)
fps = 30;
// Max/Min Frames to insert keyframes and to check relocalisation
mMinFrames = 0;
mMaxFrames = fps;
cout << endl
<< "Camera Parameters: " << endl;
cout << "- fx: " << fx << endl;
cout << "- fy: " << fy << endl;
cout << "- cx: " << cx << endl;
cout << "- cy: " << cy << endl;
cout << "- k1: " << DistCoef.at<float>(0) << endl;
cout << "- k2: " << DistCoef.at<float>(1) << endl;
if (DistCoef.rows == 5)
cout << "- k3: " << DistCoef.at<float>(4) << endl;
cout << "- p1: " << DistCoef.at<float>(2) << endl;
cout << "- p2: " << DistCoef.at<float>(3) << endl;
cout << "- fps: " << fps << endl;
int nRGB = fSettings["Camera.RGB"];
mbRGB = nRGB;
if (mbRGB)
cout << "- color order: RGB (ignored if grayscale)" << endl;
else
cout << "- color order: BGR (ignored if grayscale)" << endl;
// Load ORB parameters
m_bev_K = (cv::Mat_<float>(3, 3) << 608.0 / 50.0, 0, 304, 0, 608.0 / 50.0, 608, 0, 0, 1);
m_bev_K_invers = (cv::Mat_<float>(3, 3) << 50.0 / 608.0, 0, -25, 0, 50.0 / 608.0, -50, 0, 0, 1);
int nFeatures = fSettings["ORBextractor.nFeatures"];
float fScaleFactor = fSettings["ORBextractor.scaleFactor"];
int nLevels = fSettings["ORBextractor.nLevels"];
int fIniThFAST = fSettings["ORBextractor.iniThFAST"];
int fMinThFAST = fSettings["ORBextractor.minThFAST"];
mpORBextractorLeft = new ORBextractor(nFeatures, fScaleFactor, nLevels, fIniThFAST, fMinThFAST);
if (sensor == System::STEREO)
mpORBextractorRight = new ORBextractor(nFeatures, fScaleFactor, nLevels, fIniThFAST, fMinThFAST);
if (sensor == System::MONOCULAR)
mpIniORBextractor = new ORBextractor(2 * nFeatures, fScaleFactor, nLevels, fIniThFAST, fMinThFAST);
cout << endl
<< "ORB Extractor Parameters: " << endl;
cout << "- Number of Features: " << nFeatures << endl;
cout << "- Scale Levels: " << nLevels << endl;
cout << "- Scale Factor: " << fScaleFactor << endl;
cout << "- Initial Fast Threshold: " << fIniThFAST << endl;
cout << "- Minimum Fast Threshold: " << fMinThFAST << endl;
if (sensor == System::STEREO || sensor == System::RGBD)
{
mThDepth = mbf * (float)fSettings["ThDepth"] / fx;
cout << endl
<< "Depth Threshold (Close/Far Points): " << mThDepth << endl;
}
if (sensor == System::RGBD)
{
mDepthMapFactor = fSettings["DepthMapFactor"];
if (fabs(mDepthMapFactor) < 1e-5)
mDepthMapFactor = 1;
else
mDepthMapFactor = 1.0f / mDepthMapFactor;
}
mMap_car = cv::Mat::zeros(1000, 1000, CV_8UC3);
}
void Tracking::SetLocalMapper(LocalMapping *pLocalMapper)
{
mpLocalMapper = pLocalMapper;
}
void Tracking::SetLoopClosing(LoopClosing *pLoopClosing)
{
mpLoopClosing = pLoopClosing;
}
void Tracking::SetViewer(Viewer *pViewer)
{
mpViewer = pViewer;
}
cv::Mat Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat &imRectRight, const double &timestamp, vector<vector<double>> &vvLabel, cv::Mat &imbev, double &delta_t)
{
mbcreateKeyFrame = false;
mdelta_t = delta_t;
mImGray = imRectLeft;
mImRGB = imRectLeft;
cv::Mat imGrayRight = imRectRight;
if (mImGray.channels() == 3)
{
if (mbRGB)
{
cvtColor(mImGray, mImGray, CV_RGB2GRAY);
cvtColor(imGrayRight, imGrayRight, CV_RGB2GRAY);
}
else
{
cvtColor(mImGray, mImGray, CV_BGR2GRAY);
cvtColor(imGrayRight, imGrayRight, CV_BGR2GRAY);
}
}
else if (mImGray.channels() == 4)
{
if (mbRGB)
{
cvtColor(mImGray, mImGray, CV_RGBA2GRAY);
cvtColor(imGrayRight, imGrayRight, CV_RGBA2GRAY);
}
else
{
cvtColor(mImGray, mImGray, CV_BGRA2GRAY);
cvtColor(imGrayRight, imGrayRight, CV_BGRA2GRAY);
}
}
// 过去到当前帧
cv::Mat Velocity;
bool state = false;
cv::Mat CamPosePredict = cv::Mat::eye(4, 4, CV_32FC1);
if (mState == NO_IMAGES_YET)
{
mState = NOT_INITIALIZED;
}
if (mState == NOT_INITIALIZED)
state = true;
if (!state)
{
Velocity = Tracking::GetVelocity(); // 这里是匀速运动模型,使用上一帧作为当前估计
if (Velocity.empty())
Velocity = cv::Mat::zeros(4, 4, CV_32FC1) = cv::Mat::zeros(4, 4, CV_32FC1);
CamPosePredict = Velocity * mLastFrame_Tcw;
}
updateCarState(); // 更新车辆状态
mCurrentFrame = Frame(mImGray, imGrayRight, timestamp, mpORBextractorLeft, mpORBextractorRight, mpORBVocabulary, mK, mDistCoef, mbf, mThDepth, vvLabel, CamPosePredict, state, mvpCar);
Track();
// cout << "当前位置: " << mCurrentFrame.mTcw.at<float>(0, 3) << " " << mCurrentFrame.mTcw.at<float>(1, 3) << " " << mCurrentFrame.mTcw.at<float>(2, 3) << endl;
mCurrentFrame.mTcw.copyTo(mLastFrame_Tcw);
return mCurrentFrame.mTcw.clone();
}
cv::Mat Tracking::GrabImageRGBD(const cv::Mat &imRGB, const cv::Mat &imD, const double &timestamp, const vector<vector<double>> &vvLabel)
{
mImGray = imRGB;
cv::Mat imDepth = imD;
if (mImGray.channels() == 3)
{
if (mbRGB)
cvtColor(mImGray, mImGray, CV_RGB2GRAY);
else
cvtColor(mImGray, mImGray, CV_BGR2GRAY);
}
else if (mImGray.channels() == 4)
{
if (mbRGB)
cvtColor(mImGray, mImGray, CV_RGBA2GRAY);
else
cvtColor(mImGray, mImGray, CV_BGRA2GRAY);
}
if ((fabs(mDepthMapFactor - 1.0f) > 1e-5) || imDepth.type() != CV_32F)
imDepth.convertTo(imDepth, CV_32F, mDepthMapFactor);
// 获得相对位姿,进行模板匹配 --by钟
cv::Mat Velocity = cv::Mat::zeros(4, 4, CV_32FC1);
cv::Mat Lastframe = cv::Mat::zeros(imD.size(), CV_8UC1);
bool state = false;
if (mState == NO_IMAGES_YET)
{
mState = NOT_INITIALIZED;
}
if (mState == NOT_INITIALIZED)
state = true;
if (!state)
{
Velocity = Tracking::GetVelocity();
mLastFrame.mframe.copyTo(Lastframe);
}
mCurrentFrame = Frame(mImGray, imDepth, timestamp, mpORBextractorLeft, mpORBVocabulary, mK, mDistCoef, mbf, mThDepth, vvLabel, Velocity, state, Lastframe, mDepthMapFactor, imRGB);
Track();
return mCurrentFrame.mTcw.clone();
}
cv::Mat Tracking::GrabImageMonocular(const cv::Mat &im, const double &timestamp)
{
mImGray = im;
if (mImGray.channels() == 3)
{
if (mbRGB)
cvtColor(mImGray, mImGray, CV_RGB2GRAY);
else
cvtColor(mImGray, mImGray, CV_BGR2GRAY);
}
else if (mImGray.channels() == 4)
{
if (mbRGB)
cvtColor(mImGray, mImGray, CV_RGBA2GRAY);
else
cvtColor(mImGray, mImGray, CV_BGRA2GRAY);
}
if (mState == NOT_INITIALIZED || mState == NO_IMAGES_YET)
mCurrentFrame = Frame(mImGray, timestamp, mpIniORBextractor, mpORBVocabulary, mK, mDistCoef, mbf, mThDepth);
else
mCurrentFrame = Frame(mImGray, timestamp, mpORBextractorLeft, mpORBVocabulary, mK, mDistCoef, mbf, mThDepth);
Track();
return mCurrentFrame.mTcw.clone();
}
void Tracking::Track()
{
if (mState == NO_IMAGES_YET)
{
mState = NOT_INITIALIZED;
}
mLastProcessedState = mState;
// Get Map Mutex -> Map cannot be changed
unique_lock<mutex> lock(mpMap->mMutexMapUpdate);
if (mState == NOT_INITIALIZED)
{
if (mSensor == System::STEREO || mSensor == System::RGBD)
StereoInitialization();
else
MonocularInitialization();
mpFrameDrawer->Update(this);
if (mState != OK)
return;
}
else
{
// System is initialized. Track Frame.
bool bOK;
// Initial camera pose estimation using motion model or relocalization (if tracking is lost)
if (!mbOnlyTracking)
{
// Local Mapping is activated. This is the normal behaviour, unless
// you explicitly activate the "only tracking" mode.
if (mState == OK)
{
// Local Mapping might have changed some MapPoints tracked in last frame
CheckReplacedInLastFrame();
if (mVelocity.empty() || mCurrentFrame.mnId < mnLastRelocFrameId + 2)
{
bOK = TrackReferenceKeyFrame();
}
else
{
bOK = TrackWithMotionModel();
if (!bOK)
bOK = TrackReferenceKeyFrame();
}
}
else
{
bOK = Relocalization();
}
}
else
{
// Localization Mode: Local Mapping is deactivated
if (mState == LOST)
{
bOK = Relocalization();
}
else
{
if (!mbVO)
{
// In last frame we tracked enough MapPoints in the map
if (!mVelocity.empty())
{
bOK = TrackWithMotionModel();
}
else
{
bOK = TrackReferenceKeyFrame();
}
}
else
{
// In last frame we tracked mainly "visual odometry" points.
// We compute two camera poses, one from motion model and one doing relocalization.
// If relocalization is sucessfull we choose that solution, otherwise we retain
// the "visual odometry" solution.
bool bOKMM = false;
bool bOKReloc = false;
vector<MapPoint *> vpMPsMM;
vector<bool> vbOutMM;
cv::Mat TcwMM;
if (!mVelocity.empty())
{
bOKMM = TrackWithMotionModel();
vpMPsMM = mCurrentFrame.mvpMapPoints;
vbOutMM = mCurrentFrame.mvbOutlier;
TcwMM = mCurrentFrame.mTcw.clone();
}
bOKReloc = Relocalization();
if (bOKMM && !bOKReloc)
{
mCurrentFrame.SetPose(TcwMM);
mCurrentFrame.mvpMapPoints = vpMPsMM;
mCurrentFrame.mvbOutlier = vbOutMM;
if (mbVO)
{
for (int i = 0; i < mCurrentFrame.N; i++)
{
if (mCurrentFrame.mvpMapPoints[i] && !mCurrentFrame.mvbOutlier[i])
{
mCurrentFrame.mvpMapPoints[i]->IncreaseFound();
}
}
}
}
else if (bOKReloc)
{
mbVO = false;
}
bOK = bOKReloc || bOKMM;
}
}
}
mCurrentFrame.mpReferenceKF = mpReferenceKF;
// If we have an initial estimation of the camera pose and matching. Track the local map.
if (!mbOnlyTracking)
{
if (bOK)
bOK = TrackLocalMap();
}
else
{
// mbVO true means that there are few matches to MapPoints in the map. We cannot retrieve
// a local map and therefore we do not perform TrackLocalMap(). Once the system relocalizes
// the camera we will use the local map again.
if (bOK && !mbVO)
bOK = TrackLocalMap();
}
if (bOK)
mState = OK;
else
mState = LOST;
// Update drawer
mpFrameDrawer->Update(this);
// If tracking were good, check if we insert a keyframe
if (bOK)
{
// Update motion model
if (!mLastFrame.mTcw.empty())
{
cv::Mat LastTwc = cv::Mat::eye(4, 4, CV_32F);
mLastFrame.GetRotationInverse().copyTo(LastTwc.rowRange(0, 3).colRange(0, 3));
mLastFrame.GetCameraCenter().copyTo(LastTwc.rowRange(0, 3).col(3));
mVelocity = mCurrentFrame.mTcw * LastTwc;
}
else
mVelocity = cv::Mat();
mpMapDrawer->SetCurrentCameraPose(mCurrentFrame.mTcw);
// Clean VO matches
for (int i = 0; i < mCurrentFrame.N; i++)
{
MapPoint *pMP = mCurrentFrame.mvpMapPoints[i];
if (pMP)
if (pMP->Observations() < 1)
{
mCurrentFrame.mvbOutlier[i] = false;
mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint *>(NULL);
}
}
// Delete temporal MapPoints
for (list<MapPoint *>::iterator lit = mlpTemporalPoints.begin(), lend = mlpTemporalPoints.end(); lit != lend; lit++)
{
MapPoint *pMP = *lit;
delete pMP;
}
mlpTemporalPoints.clear();
Track_Car();
// Check if we need to insert a new keyframe
if (NeedNewKeyFrame())
{
mbcreateKeyFrame = true;
CreateNewKeyFrame();
}
// We allow points with high innovation (considererd outliers by the Huber Function)
// pass to the new keyframe, so that bundle adjustment will finally decide
// if they are outliers or not. We don't want next frame to estimate its position
// with those points so we discard them in the frame.
for (int i = 0; i < mCurrentFrame.N; i++)
{
if (mCurrentFrame.mvpMapPoints[i] && mCurrentFrame.mvbOutlier[i])
mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint *>(NULL);
}
}
// Reset if the camera get lost soon after initialization
if (mState == LOST)
{
if (mpMap->KeyFramesInMap() <= 5)
{
cout << "Track lost soon after initialisation, reseting..." << endl;
mpSystem->Reset();
return;
}
}
if (!mCurrentFrame.mpReferenceKF)
mCurrentFrame.mpReferenceKF = mpReferenceKF;
mLastFrame = Frame(mCurrentFrame);
}
// Store frame pose information to retrieve the complete camera trajectory afterwards.
if (!mCurrentFrame.mTcw.empty())
{
cv::Mat Tcr = mCurrentFrame.mTcw * mCurrentFrame.mpReferenceKF->GetPoseInverse();
mlRelativeFramePoses.push_back(Tcr);
mlpReferences.push_back(mpReferenceKF);
mlFrameTimes.push_back(mCurrentFrame.mTimeStamp);
mlbLost.push_back(mState == LOST);
}
else
{
// This can happen if tracking is lost
mlRelativeFramePoses.push_back(mlRelativeFramePoses.back());
mlpReferences.push_back(mlpReferences.back());
mlFrameTimes.push_back(mlFrameTimes.back());
mlbLost.push_back(mState == LOST);
}
}
void Tracking::Track_Car()
{
// 重新获得需要跟踪车辆的个数
int n_Car = mvpCar.size();
mvvCarV.clear();
// 添加观测,新建车辆模型
for (int i = 0; i < mCurrentFrame.mvvLabel.size(); i++)
{
vector<double> bbox = mCurrentFrame.mvvLabel[i];
if (bbox[0] == -1)
continue;
if (mCurrentFrame.mvCarMatches[i] >= 0)
{
Car *car = mvpCar[mCurrentFrame.mvCarMatches[i]];
// step1:更新最近观测帧
car->updateObservations(mCurrentFrame.mnId);
// step2: 更新目标框对应关系
// pair<int, int> p1(mCurrentFrame.mnId, index);
// car->mv_pair_connect.push_back(p1);
car->setTrackedState(true); // 跟像跟踪状态
// step3: 更新世界位置和速度
car->updateState(bbox, mCurrentFrame.mRwc, mCurrentFrame.mOw, mdelta_t);
}
}
// 跟踪丢失车辆并更新数据库所有车辆的轨迹
for (int j = 0; j < n_Car; j++)
{
Car *car = mvpCar[j];
if (!car->isBad() && !car->isTracked())
{
car->update_untracked_Car();
}
if (!car->isBad())
{
car->updatetraj();
double v = car->get_Vel(mdelta_t);
// 对mvvCarV 添加 uv,vel
Get_Motion_Car_V(car);
}
}
}
void Tracking::updateCarState()
{
for (int i = 0; i < mvpCar.size(); i++)
{
auto car = mvpCar[i];
if (car->isErro())
{
mvpCar.erase(mvpCar.begin() + i);
mpMap->EraseMapCar(car);
delete car;
i--;
continue;
}
car->InCreaseFound(mLastFrame_Tcw);
car->setTrackedState(false); // 设置当前跟踪状态,还没开始跟踪
car->updateTrackState(mCurrentFrame.mnId + 1); // 设置跟踪是否丢失
PredictCarPos(car); // 预测车辆位置
}
}
void Tracking::StereoInitialization()
{
if (mCurrentFrame.N > 500)
{
// Set Frame pose to the origin
mCurrentFrame.SetPose(cv::Mat::eye(4, 4, CV_32F));
// Create KeyFrame
KeyFrame *pKFini = new KeyFrame(mCurrentFrame, mpMap, mpKeyFrameDB);
// Insert KeyFrame in the map
mpMap->AddKeyFrame(pKFini);
// Create MapPoints and asscoiate to KeyFrame
for (int i = 0; i < mCurrentFrame.N; i++)
{
float z = mCurrentFrame.mvDepth[i];
if (z > 0)
{
cv::Mat x3D = mCurrentFrame.UnprojectStereo(i);
MapPoint *pNewMP = new MapPoint(x3D, pKFini, mpMap);
pNewMP->AddObservation(pKFini, i);
pKFini->AddMapPoint(pNewMP, i);
pNewMP->ComputeDistinctiveDescriptors();
pNewMP->UpdateNormalAndDepth();
mpMap->AddMapPoint(pNewMP);
mCurrentFrame.mvpMapPoints[i] = pNewMP;
}
}
for (int i = 0; i < mCurrentFrame.mvvLabel.size(); i++)
{
if (mCurrentFrame.mvCarMatches[i] == -2)
{
Car *new_car = new Car(mCurrentFrame, mCurrentFrame.mvvLabel[i]);
mvpCar.push_back(new_car);
mpMap->AddMapCar(new_car);
}
}
cout << "New map created with " << mpMap->MapPointsInMap() << " points" << endl;
cout << "New map created with " << mpMap->GetAllMapCars().size() << " Cars" << endl;
mpLocalMapper->InsertKeyFrame(pKFini);
mLastFrame = Frame(mCurrentFrame);
mnLastKeyFrameId = mCurrentFrame.mnId;
mpLastKeyFrame = pKFini;
mvpLocalKeyFrames.push_back(pKFini);
mvpLocalMapPoints = mpMap->GetAllMapPoints();
mpReferenceKF = pKFini;
mCurrentFrame.mpReferenceKF = pKFini;
mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
mpMap->mvpKeyFrameOrigins.push_back(pKFini);
mpMapDrawer->SetCurrentCameraPose(mCurrentFrame.mTcw);
mState = OK;
}
}
void Tracking::MonocularInitialization()
{
if (!mpInitializer)
{
// Set Reference Frame
if (mCurrentFrame.mvKeys.size() > 100)
{
mInitialFrame = Frame(mCurrentFrame);
mLastFrame = Frame(mCurrentFrame);
mvbPrevMatched.resize(mCurrentFrame.mvKeysUn.size());
for (size_t i = 0; i < mCurrentFrame.mvKeysUn.size(); i++)
mvbPrevMatched[i] = mCurrentFrame.mvKeysUn[i].pt;
if (mpInitializer)
delete mpInitializer;
mpInitializer = new Initializer(mCurrentFrame, 1.0, 200);
fill(mvIniMatches.begin(), mvIniMatches.end(), -1);
return;
}
}
else
{
// Try to initialize
if ((int)mCurrentFrame.mvKeys.size() <= 100)
{
delete mpInitializer;
mpInitializer = static_cast<Initializer *>(NULL);
fill(mvIniMatches.begin(), mvIniMatches.end(), -1);
return;
}
// Find correspondences
ORBmatcher matcher(0.9, true);
int nmatches = matcher.SearchForInitialization(mInitialFrame, mCurrentFrame, mvbPrevMatched, mvIniMatches, 100);
// Check if there are enough correspondences
if (nmatches < 100)
{
delete mpInitializer;
mpInitializer = static_cast<Initializer *>(NULL);
return;
}
cv::Mat Rcw; // Current Camera Rotation
cv::Mat tcw; // Current Camera Translation
vector<bool> vbTriangulated; // Triangulated Correspondences (mvIniMatches)
if (mpInitializer->Initialize(mCurrentFrame, mvIniMatches, Rcw, tcw, mvIniP3D, vbTriangulated))
{
for (size_t i = 0, iend = mvIniMatches.size(); i < iend; i++)
{
if (mvIniMatches[i] >= 0 && !vbTriangulated[i])
{
mvIniMatches[i] = -1;
nmatches--;
}
}
// Set Frame Poses
mInitialFrame.SetPose(cv::Mat::eye(4, 4, CV_32F));
cv::Mat Tcw = cv::Mat::eye(4, 4, CV_32F);
Rcw.copyTo(Tcw.rowRange(0, 3).colRange(0, 3));
tcw.copyTo(Tcw.rowRange(0, 3).col(3));
mCurrentFrame.SetPose(Tcw);
CreateInitialMapMonocular();
}
}
}
void Tracking::CreateInitialMapMonocular()
{
// Create KeyFrames
KeyFrame *pKFini = new KeyFrame(mInitialFrame, mpMap, mpKeyFrameDB);
KeyFrame *pKFcur = new KeyFrame(mCurrentFrame, mpMap, mpKeyFrameDB);
pKFini->ComputeBoW();
pKFcur->ComputeBoW();
// Insert KFs in the map
mpMap->AddKeyFrame(pKFini);
mpMap->AddKeyFrame(pKFcur);
// Create MapPoints and asscoiate to keyframes
for (size_t i = 0; i < mvIniMatches.size(); i++)
{
if (mvIniMatches[i] < 0)
continue;
// Create MapPoint.
cv::Mat worldPos(mvIniP3D[i]);
MapPoint *pMP = new MapPoint(worldPos, pKFcur, mpMap);
pKFini->AddMapPoint(pMP, i);
pKFcur->AddMapPoint(pMP, mvIniMatches[i]);
pMP->AddObservation(pKFini, i);
pMP->AddObservation(pKFcur, mvIniMatches[i]);
pMP->ComputeDistinctiveDescriptors();
pMP->UpdateNormalAndDepth();
// Fill Current Frame structure
mCurrentFrame.mvpMapPoints[mvIniMatches[i]] = pMP;
mCurrentFrame.mvbOutlier[mvIniMatches[i]] = false;
// Add to Map
mpMap->AddMapPoint(pMP);
}
// Update Connections
pKFini->UpdateConnections();
pKFcur->UpdateConnections();
// Bundle Adjustment
cout << "New Map created with " << mpMap->MapPointsInMap() << " points" << endl;
Optimizer::GlobalBundleAdjustemnt(mpMap, 20);
// Set median depth to 1
float medianDepth = pKFini->ComputeSceneMedianDepth(2);
float invMedianDepth = 1.0f / medianDepth;
if (medianDepth < 0 || pKFcur->TrackedMapPoints(1) < 100)
{
cout << "Wrong initialization, reseting..." << endl;
Reset();
return;
}
// Scale initial baseline
cv::Mat Tc2w = pKFcur->GetPose();
Tc2w.col(3).rowRange(0, 3) = Tc2w.col(3).rowRange(0, 3) * invMedianDepth;
pKFcur->SetPose(Tc2w);
// Scale points
vector<MapPoint *> vpAllMapPoints = pKFini->GetMapPointMatches();
for (size_t iMP = 0; iMP < vpAllMapPoints.size(); iMP++)
{
if (vpAllMapPoints[iMP])
{
MapPoint *pMP = vpAllMapPoints[iMP];
pMP->SetWorldPos(pMP->GetWorldPos() * invMedianDepth);
}
}
mpLocalMapper->InsertKeyFrame(pKFini);
mpLocalMapper->InsertKeyFrame(pKFcur);
mCurrentFrame.SetPose(pKFcur->GetPose());
mnLastKeyFrameId = mCurrentFrame.mnId;
mpLastKeyFrame = pKFcur;
mvpLocalKeyFrames.push_back(pKFcur);
mvpLocalKeyFrames.push_back(pKFini);
mvpLocalMapPoints = mpMap->GetAllMapPoints();
mpReferenceKF = pKFcur;
mCurrentFrame.mpReferenceKF = pKFcur;
mLastFrame = Frame(mCurrentFrame);
mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
mpMapDrawer->SetCurrentCameraPose(pKFcur->GetPose());
mpMap->mvpKeyFrameOrigins.push_back(pKFini);
mState = OK;
}
void Tracking::CheckReplacedInLastFrame()
{
for (int i = 0; i < mLastFrame.N; i++)
{
MapPoint *pMP = mLastFrame.mvpMapPoints[i];
if (pMP)
{
MapPoint *pRep = pMP->GetReplaced();
if (pRep)
{
mLastFrame.mvpMapPoints[i] = pRep;
}
}
}
}
bool Tracking::TrackReferenceKeyFrame()
{
// Compute Bag of Words vector
mCurrentFrame.ComputeBoW();
// We perform first an ORB matching with the reference keyframe
// If enough matches are found we setup a PnP solver
ORBmatcher matcher(0.7, true);
vector<MapPoint *> vpMapPointMatches;
int nmatches = matcher.SearchByBoW(mpReferenceKF, mCurrentFrame, vpMapPointMatches);
if (nmatches < 15)
return false;
mCurrentFrame.mvpMapPoints = vpMapPointMatches;
mCurrentFrame.SetPose(mLastFrame.mTcw);
Optimizer::PoseOptimization(&mCurrentFrame);
// Discard outliers
int nmatchesMap = 0;
for (int i = 0; i < mCurrentFrame.N; i++)
{
if (mCurrentFrame.mvpMapPoints[i])
{
if (mCurrentFrame.mvbOutlier[i])
{
MapPoint *pMP = mCurrentFrame.mvpMapPoints[i];
mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint *>(NULL);
mCurrentFrame.mvbOutlier[i] = false;
pMP->mbTrackInView = false;
pMP->mnLastFrameSeen = mCurrentFrame.mnId;
nmatches--;
}
else if (mCurrentFrame.mvpMapPoints[i]->Observations() > 0)
nmatchesMap++;
}
}
return nmatchesMap >= 10;
}
void Tracking::UpdateLastFrame()
{
// Update pose according to reference keyframe
KeyFrame *pRef = mLastFrame.mpReferenceKF;
cv::Mat Tlr = mlRelativeFramePoses.back();
mLastFrame.SetPose(Tlr * pRef->GetPose());
if (mnLastKeyFrameId == mLastFrame.mnId || mSensor == System::MONOCULAR || !mbOnlyTracking)
return;
// Create "visual odometry" MapPoints
// We sort points according to their measured depth by the stereo/RGB-D sensor
vector<pair<float, int>> vDepthIdx;
vDepthIdx.reserve(mLastFrame.N);
for (int i = 0; i < mLastFrame.N; i++)
{
float z = mLastFrame.mvDepth[i];
if (z > 0)
{
vDepthIdx.push_back(make_pair(z, i));
}
}
if (vDepthIdx.empty())
return;
sort(vDepthIdx.begin(), vDepthIdx.end());
// We insert all close points (depth<mThDepth)
// If less than 100 close points, we insert the 100 closest ones.
int nPoints = 0;
for (size_t j = 0; j < vDepthIdx.size(); j++)
{
int i = vDepthIdx[j].second;
bool bCreateNew = false;
MapPoint *pMP = mLastFrame.mvpMapPoints[i];
if (!pMP)
bCreateNew = true;
else if (pMP->Observations() < 1)
{
bCreateNew = true;
}
if (bCreateNew)
{
cv::Mat x3D = mLastFrame.UnprojectStereo(i);
MapPoint *pNewMP = new MapPoint(x3D, mpMap, &mLastFrame, i);
mLastFrame.mvpMapPoints[i] = pNewMP;
mlpTemporalPoints.push_back(pNewMP);
nPoints++;
}
else
{
nPoints++;
}
if (vDepthIdx[j].first > mThDepth && nPoints > 100)
break;
}
}
bool Tracking::TrackWithMotionModel()
{
ORBmatcher matcher(0.9, true);
// Update last frame pose according to its reference keyframe
// Create "visual odometry" points if in Localization Mode
UpdateLastFrame();
mCurrentFrame.SetPose(mVelocity * mLastFrame.mTcw);
fill(mCurrentFrame.mvpMapPoints.begin(), mCurrentFrame.mvpMapPoints.end(), static_cast<MapPoint *>(NULL));
// Project points seen in previous frame
int th;
if (mSensor != System::STEREO)
th = 15;
else
th = 7;
int nmatches = matcher.SearchByProjection(mCurrentFrame, mLastFrame, th, mSensor == System::MONOCULAR);
// If few matches, uses a wider window search
if (nmatches < 20)
{
fill(mCurrentFrame.mvpMapPoints.begin(), mCurrentFrame.mvpMapPoints.end(), static_cast<MapPoint *>(NULL));
nmatches = matcher.SearchByProjection(mCurrentFrame, mLastFrame, 2 * th, mSensor == System::MONOCULAR);
}
if (nmatches < 20)
return false;
// Optimize frame pose with all matches
// Optimizer::PoseOptimization(&mCurrentFrame);
Optimizer::PoseOptimizationWithCars(&mCurrentFrame);
// Discard outliers
int nmatchesMap = 0;
for (int i = 0; i < mCurrentFrame.N; i++)
{
if (mCurrentFrame.mvpMapPoints[i])
{
if (mCurrentFrame.mvbOutlier[i])
{
MapPoint *pMP = mCurrentFrame.mvpMapPoints[i];
mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint *>(NULL);
mCurrentFrame.mvbOutlier[i] = false;
pMP->mbTrackInView = false;
pMP->mnLastFrameSeen = mCurrentFrame.mnId;
nmatches--;
}
else if (mCurrentFrame.mvpMapPoints[i]->Observations() > 0)
nmatchesMap++;
}
}
if (mbOnlyTracking)
{
mbVO = nmatchesMap < 10;
return nmatches > 20;
}
return nmatchesMap >= 10;
}
bool Tracking::TrackLocalMap()
{
// We have an estimation of the camera pose and some map points tracked in the frame.
// We retrieve the local map and try to find matches to points in the local map.
UpdateLocalMap();
SearchLocalPoints();
// Optimize Pose
// Optimizer::PoseOptimization(&mCurrentFrame);
Optimizer::PoseOptimizationWithCars(&mCurrentFrame);
mnMatchesInliers = 0;
// Update MapPoints Statistics
for (int i = 0; i < mCurrentFrame.N; i++)
{
if (mCurrentFrame.mvpMapPoints[i])
{
if (!mCurrentFrame.mvbOutlier[i])
{
mCurrentFrame.mvpMapPoints[i]->IncreaseFound();
if (!mbOnlyTracking)
{
if (mCurrentFrame.mvpMapPoints[i]->Observations() > 0)
mnMatchesInliers++;
}
else
mnMatchesInliers++;
}
else if (mSensor == System::STEREO)
mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint *>(NULL);
}
}
// Decide if the tracking was succesful
// More restrictive if there was a relocalization recently
if (mCurrentFrame.mnId < mnLastRelocFrameId + mMaxFrames && mnMatchesInliers < 50)
return false;
if (mnMatchesInliers < 30)
return false;
else
return true;
}
bool Tracking::NeedNewKeyFrame()
{
if (mbOnlyTracking)
return false;
// If Local Mapping is freezed by a Loop Closure do not insert keyframes
if (mpLocalMapper->isStopped() || mpLocalMapper->stopRequested())
return false;
const int nKFs = mpMap->KeyFramesInMap();
// Do not insert keyframes if not enough frames have passed from last relocalisation
if (mCurrentFrame.mnId < mnLastRelocFrameId + mMaxFrames && nKFs > mMaxFrames)
return false;
// Tracked MapPoints in the reference keyframe
int nMinObs = 3;
if (nKFs <= 2)
nMinObs = 2;
int nRefMatches = mpReferenceKF->TrackedMapPoints(nMinObs);
// Local Mapping accept keyframes?
bool bLocalMappingIdle = mpLocalMapper->AcceptKeyFrames();
// Check how many "close" points are being tracked and how many could be potentially created.
int nNonTrackedClose = 0;
int nTrackedClose = 0;
if (mSensor != System::MONOCULAR)
{
for (int i = 0; i < mCurrentFrame.N; i++)
{
if (mCurrentFrame.mvDepth[i] > 0 && mCurrentFrame.mvDepth[i] < mThDepth)
{
if (mCurrentFrame.mvpMapPoints[i] && !mCurrentFrame.mvbOutlier[i])
nTrackedClose++;
else
nNonTrackedClose++;
}
}
}
bool bNeedToInsertClose = (nTrackedClose < 100) && (nNonTrackedClose > 70);
// Thresholds
float thRefRatio = 0.75f;
if (nKFs < 2)
thRefRatio = 0.4f;
if (mSensor == System::MONOCULAR)
thRefRatio = 0.9f;
// Condition 1a: More than "MaxFrames" have passed from last keyframe insertion
const bool c1a = mCurrentFrame.mnId >= mnLastKeyFrameId + mMaxFrames;
// Condition 1b: More than "MinFrames" have passed and Local Mapping is idle
const bool c1b = (mCurrentFrame.mnId >= mnLastKeyFrameId + mMinFrames && bLocalMappingIdle);
// Condition 1c: tracking is weak
const bool c1c = mSensor != System::MONOCULAR && (mnMatchesInliers < nRefMatches * 0.25 || bNeedToInsertClose);
// Condition 2: Few tracked points compared to reference keyframe. Lots of visual odometry compared to map matches.
const bool c2 = ((mnMatchesInliers < nRefMatches * thRefRatio || bNeedToInsertClose) && mnMatchesInliers > 15);
if ((c1a || c1b || c1c) && c2)
{
// If the mapping accepts keyframes, insert keyframe.
// Otherwise send a signal to interrupt BA
if (bLocalMappingIdle)
{
return true;
}
else
{
mpLocalMapper->InterruptBA();
if (mSensor != System::MONOCULAR)
{
if (mpLocalMapper->KeyframesInQueue() < 3)
return true;
else
return false;
}
else
return false;
}
}
else
return false;
}
void Tracking::CreateNewKeyFrame()
{
if (!mpLocalMapper->SetNotStop(true))
return;
KeyFrame *pKF = new KeyFrame(mCurrentFrame, mpMap, mpKeyFrameDB);
mpReferenceKF = pKF;
mCurrentFrame.mpReferenceKF = pKF;
if (mSensor != System::MONOCULAR)
{
mCurrentFrame.UpdatePoseMatrices();
// We sort points by the measured depth by the stereo/RGBD sensor.
// We create all those MapPoints whose depth < mThDepth.
// If there are less than 100 close points we create the 100 closest.
vector<pair<float, int>> vDepthIdx;
vDepthIdx.reserve(mCurrentFrame.N);
for (int i = 0; i < mCurrentFrame.N; i++)
{
float z = mCurrentFrame.mvDepth[i];
if (z > 0)
{
vDepthIdx.push_back(make_pair(z, i));
}
}
if (!vDepthIdx.empty())
{
sort(vDepthIdx.begin(), vDepthIdx.end());
int nPoints = 0;
for (size_t j = 0; j < vDepthIdx.size(); j++)
{
int i = vDepthIdx[j].second;
bool bCreateNew = false;
MapPoint *pMP = mCurrentFrame.mvpMapPoints[i];
if (!pMP)
bCreateNew = true;
else if (pMP->Observations() < 1)
{
bCreateNew = true;
mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint *>(NULL);
}
if (bCreateNew)
{
cv::Mat x3D = mCurrentFrame.UnprojectStereo(i);
MapPoint *pNewMP = new MapPoint(x3D, pKF, mpMap);
pNewMP->AddObservation(pKF, i);
pKF->AddMapPoint(pNewMP, i);
pNewMP->ComputeDistinctiveDescriptors();
pNewMP->UpdateNormalAndDepth();
mpMap->AddMapPoint(pNewMP);
mCurrentFrame.mvpMapPoints[i] = pNewMP;
nPoints++;
}
else
{
nPoints++;
}
if (vDepthIdx[j].first > mThDepth && nPoints > 100)
break;
}
}
}
for (int i = 0; i < mCurrentFrame.mvvLabel.size(); i++)
{
if (mCurrentFrame.mvCarMatches[i] == -2)
{
Car *new_car = new Car(mCurrentFrame, mCurrentFrame.mvvLabel[i]);
mvpCar.push_back(new_car);
mpMap->AddMapCar(new_car);
}
}
mpLocalMapper->InsertKeyFrame(pKF);
mpLocalMapper->SetNotStop(false);
mnLastKeyFrameId = mCurrentFrame.mnId;
mpLastKeyFrame = pKF;
}
void Tracking::SearchLocalPoints()
{
// Do not search map points already matched
for (vector<MapPoint *>::iterator vit = mCurrentFrame.mvpMapPoints.begin(), vend = mCurrentFrame.mvpMapPoints.end(); vit != vend; vit++)
{
MapPoint *pMP = *vit;
if (pMP)
{
if (pMP->isBad())
{
*vit = static_cast<MapPoint *>(NULL);
}
else
{
pMP->IncreaseVisible();
pMP->mnLastFrameSeen = mCurrentFrame.mnId;
pMP->mbTrackInView = false;
}
}
}
int nToMatch = 0;
// Project points in frame and check its visibility
for (vector<MapPoint *>::iterator vit = mvpLocalMapPoints.begin(), vend = mvpLocalMapPoints.end(); vit != vend; vit++)
{
MapPoint *pMP = *vit;
if (pMP->mnLastFrameSeen == mCurrentFrame.mnId)
continue;
if (pMP->isBad())
continue;
// Project (this fills MapPoint variables for matching)
if (mCurrentFrame.isInFrustum(pMP, 0.5))
{
pMP->IncreaseVisible();
nToMatch++;
}
}
if (nToMatch > 0)
{
ORBmatcher matcher(0.8);
int th = 1;
if (mSensor == System::RGBD)
th = 3;
// If the camera has been relocalised recently, perform a coarser search
if (mCurrentFrame.mnId < mnLastRelocFrameId + 2)
th = 5;
matcher.SearchByProjection(mCurrentFrame, mvpLocalMapPoints, th);
}
}
void Tracking::UpdateLocalMap()
{
// This is for visualization
mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
// Update
UpdateLocalKeyFrames();
UpdateLocalPoints();
}
void Tracking::UpdateLocalPoints()
{
mvpLocalMapPoints.clear();
for (vector<KeyFrame *>::const_iterator itKF = mvpLocalKeyFrames.begin(), itEndKF = mvpLocalKeyFrames.end(); itKF != itEndKF; itKF++)
{
KeyFrame *pKF = *itKF;
const vector<MapPoint *> vpMPs = pKF->GetMapPointMatches();
for (vector<MapPoint *>::const_iterator itMP = vpMPs.begin(), itEndMP = vpMPs.end(); itMP != itEndMP; itMP++)
{
MapPoint *pMP = *itMP;
if (!pMP)
continue;
if (pMP->mnTrackReferenceForFrame == mCurrentFrame.mnId)
continue;
if (!pMP->isBad())
{
mvpLocalMapPoints.push_back(pMP);
pMP->mnTrackReferenceForFrame = mCurrentFrame.mnId;
}
}
}
}
void Tracking::UpdateLocalKeyFrames()
{
// Each map point vote for the keyframes in which it has been observed
map<KeyFrame *, int> keyframeCounter;
for (int i = 0; i < mCurrentFrame.N; i++)
{
if (mCurrentFrame.mvpMapPoints[i])
{
MapPoint *pMP = mCurrentFrame.mvpMapPoints[i];
if (!pMP->isBad())
{
const map<KeyFrame *, size_t> observations = pMP->GetObservations();
for (map<KeyFrame *, size_t>::const_iterator it = observations.begin(), itend = observations.end(); it != itend; it++)
keyframeCounter[it->first]++;
}
else
{
mCurrentFrame.mvpMapPoints[i] = NULL;
}
}
}
if (keyframeCounter.empty())
return;
int max = 0;
KeyFrame *pKFmax = static_cast<KeyFrame *>(NULL);
mvpLocalKeyFrames.clear();
mvpLocalKeyFrames.reserve(3 * keyframeCounter.size());
// All keyframes that observe a map point are included in the local map. Also check which keyframe shares most points
for (map<KeyFrame *, int>::const_iterator it = keyframeCounter.begin(), itEnd = keyframeCounter.end(); it != itEnd; it++)
{
KeyFrame *pKF = it->first;
if (pKF->isBad())
continue;
if (it->second > max)
{
max = it->second;
pKFmax = pKF;
}
mvpLocalKeyFrames.push_back(it->first);
pKF->mnTrackReferenceForFrame = mCurrentFrame.mnId;
}
// Include also some not-already-included keyframes that are neighbors to already-included keyframes
for (vector<KeyFrame *>::const_iterator itKF = mvpLocalKeyFrames.begin(), itEndKF = mvpLocalKeyFrames.end(); itKF != itEndKF; itKF++)
{
// Limit the number of keyframes
if (mvpLocalKeyFrames.size() > 80)
break;
KeyFrame *pKF = *itKF;
const vector<KeyFrame *> vNeighs = pKF->GetBestCovisibilityKeyFrames(10);
for (vector<KeyFrame *>::const_iterator itNeighKF = vNeighs.begin(), itEndNeighKF = vNeighs.end(); itNeighKF != itEndNeighKF; itNeighKF++)
{
KeyFrame *pNeighKF = *itNeighKF;
if (!pNeighKF->isBad())
{
if (pNeighKF->mnTrackReferenceForFrame != mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pNeighKF);
pNeighKF->mnTrackReferenceForFrame = mCurrentFrame.mnId;
break;
}
}
}
const set<KeyFrame *> spChilds = pKF->GetChilds();
for (set<KeyFrame *>::const_iterator sit = spChilds.begin(), send = spChilds.end(); sit != send; sit++)
{
KeyFrame *pChildKF = *sit;
if (!pChildKF->isBad())
{
if (pChildKF->mnTrackReferenceForFrame != mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pChildKF);
pChildKF->mnTrackReferenceForFrame = mCurrentFrame.mnId;
break;
}
}
}
KeyFrame *pParent = pKF->GetParent();
if (pParent)
{
if (pParent->mnTrackReferenceForFrame != mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pParent);
pParent->mnTrackReferenceForFrame = mCurrentFrame.mnId;
break;
}
}
}
if (pKFmax)
{
mpReferenceKF = pKFmax;
mCurrentFrame.mpReferenceKF = mpReferenceKF;
}
}
bool Tracking::Relocalization()
{
// Compute Bag of Words Vector
mCurrentFrame.ComputeBoW();
// Relocalization is performed when tracking is lost
// Track Lost: Query KeyFrame Database for keyframe candidates for relocalisation
vector<KeyFrame *> vpCandidateKFs = mpKeyFrameDB->DetectRelocalizationCandidates(&mCurrentFrame);
if (vpCandidateKFs.empty())
return false;
const int nKFs = vpCandidateKFs.size();
// We perform first an ORB matching with each candidate
// If enough matches are found we setup a PnP solver
ORBmatcher matcher(0.75, true);
vector<PnPsolver *> vpPnPsolvers;
vpPnPsolvers.resize(nKFs);
vector<vector<MapPoint *>> vvpMapPointMatches;
vvpMapPointMatches.resize(nKFs);
vector<bool> vbDiscarded;
vbDiscarded.resize(nKFs);
int nCandidates = 0;
for (int i = 0; i < nKFs; i++)
{
KeyFrame *pKF = vpCandidateKFs[i];
if (pKF->isBad())
vbDiscarded[i] = true;
else
{
int nmatches = matcher.SearchByBoW(pKF, mCurrentFrame, vvpMapPointMatches[i]);
if (nmatches < 15)
{
vbDiscarded[i] = true;
continue;
}
else
{
PnPsolver *pSolver = new PnPsolver(mCurrentFrame, vvpMapPointMatches[i]);
pSolver->SetRansacParameters(0.99, 10, 300, 4, 0.5, 5.991);
vpPnPsolvers[i] = pSolver;
nCandidates++;
}
}
}
// Alternatively perform some iterations of P4P RANSAC
// Until we found a camera pose supported by enough inliers
bool bMatch = false;
ORBmatcher matcher2(0.9, true);
while (nCandidates > 0 && !bMatch)
{
for (int i = 0; i < nKFs; i++)
{
if (vbDiscarded[i])
continue;
// Perform 5 Ransac Iterations
vector<bool> vbInliers;
int nInliers;
bool bNoMore;
PnPsolver *pSolver = vpPnPsolvers[i];
cv::Mat Tcw = pSolver->iterate(5, bNoMore, vbInliers, nInliers);
// If Ransac reachs max. iterations discard keyframe
if (bNoMore)
{
vbDiscarded[i] = true;
nCandidates--;
}
// If a Camera Pose is computed, optimize
if (!Tcw.empty())
{
Tcw.copyTo(mCurrentFrame.mTcw);
set<MapPoint *> sFound;
const int np = vbInliers.size();
for (int j = 0; j < np; j++)
{
if (vbInliers[j])
{
mCurrentFrame.mvpMapPoints[j] = vvpMapPointMatches[i][j];
sFound.insert(vvpMapPointMatches[i][j]);
}
else
mCurrentFrame.mvpMapPoints[j] = NULL;
}
int nGood = Optimizer::PoseOptimization(&mCurrentFrame);
if (nGood < 10)
continue;
for (int io = 0; io < mCurrentFrame.N; io++)
if (mCurrentFrame.mvbOutlier[io])
mCurrentFrame.mvpMapPoints[io] = static_cast<MapPoint *>(NULL);
// If few inliers, search by projection in a coarse window and optimize again
if (nGood < 50)
{
int nadditional = matcher2.SearchByProjection(mCurrentFrame, vpCandidateKFs[i], sFound, 10, 100);
if (nadditional + nGood >= 50)
{
nGood = Optimizer::PoseOptimization(&mCurrentFrame);
// If many inliers but still not enough, search by projection again in a narrower window
// the camera has been already optimized with many points
if (nGood > 30 && nGood < 50)
{
sFound.clear();
for (int ip = 0; ip < mCurrentFrame.N; ip++)
if (mCurrentFrame.mvpMapPoints[ip])
sFound.insert(mCurrentFrame.mvpMapPoints[ip]);
nadditional = matcher2.SearchByProjection(mCurrentFrame, vpCandidateKFs[i], sFound, 3, 64);
// Final optimization
if (nGood + nadditional >= 50)
{
nGood = Optimizer::PoseOptimization(&mCurrentFrame);
for (int io = 0; io < mCurrentFrame.N; io++)
if (mCurrentFrame.mvbOutlier[io])
mCurrentFrame.mvpMapPoints[io] = NULL;
}
}
}
}
// If the pose is supported by enough inliers stop ransacs and continue
if (nGood >= 50)
{
bMatch = true;
break;
}
}
}
}
if (!bMatch)
{
return false;
}
else
{
mnLastRelocFrameId = mCurrentFrame.mnId;
return true;
}
}
bool Tracking::needCreate(vector<double> &bbox, int &index, int &n)
{
cout << "enter need Create" << endl;
vector<cv::Point> pts2;
cv::Point PointArray1[4], PointArray2[4];
Tracking::get_bbx(bbox, pts2, 1.5); // 当前检测的目标框
if (bbox[1] < 338 && bbox[1] > 270 && bbox[2] > 558)
{
return 0;
}
PointArray2[0] = pts2[0]; // 存放第二个预测框
PointArray2[1] = pts2[1];
PointArray2[2] = pts2[2];
PointArray2[3] = pts2[3];
float max_iou = 0.0;
for (int i = 0; i < n; i++)
{
Car *car = mvpCar[i];
if (!car->isBad())
{
if (car->isTracked())
{
continue;
}
vector<cv::Mat> vmbboxPose = car->mvmBboxWorldPosePredict; // 目标框估计位置
vector<cv::Point> vp_bev;
Tracking::world2bev(vmbboxPose, vp_bev, mCurrentFrame.mTcw); // 车辆投影到当前bev
PointArray1[0] = vp_bev[0]; // 存放第二个预测框
PointArray1[1] = vp_bev[1];
PointArray1[2] = vp_bev[2];
PointArray1[3] = vp_bev[3];
// cout << vp_bev << " " << pts2[0] << endl; // TODO 问题
float IOU = get_IOU(PointArray1, PointArray2);
if (max_iou < IOU)
max_iou = IOU;
double th;
if (car->mnVisual > 1)
{
th = 0.8;
}
else
{
th = 0.2;
}
if (IOU > th) // 匹配上就不需要创建了,这里会更新车辆位置、速度
{
// step1:更新最近观测帧
car->updateObservations(mCurrentFrame.mnId);
// step2: 更新目标框对应关系
pair<int, int> p1(mCurrentFrame.mnId, index);
car->mv_pair_connect.push_back(p1);
car->setTrackedState(true); // 跟像跟踪状态
// step3: 更新世界位置和速度
car->updateState(bbox, mCurrentFrame.mRwc, mCurrentFrame.mOw, mdelta_t);
break;
}
}
}
return (max_iou == 0);
}
void Tracking::get_bbx(const vector<double> &label1, std ::vector<cv::Point> &pts, double scale)
{
cv::Point p1, p2, p3, p4;
double x = label1[1];
double y = label1[2];
double w = label1[3];
double l = label1[4];
double yaw = label1[7];
double cos_yaw, sin_yaw;
w = w * scale;
l = l * scale;
cos_yaw = cos(yaw);
sin_yaw = sin(yaw);
// front left
p1.x = x - w / 2 * cos_yaw - l / 2 * sin_yaw;
p1.y = y - w / 2 * sin_yaw + l / 2 * cos_yaw;
// rear left
p2.x = x - w / 2 * cos_yaw + l / 2 * sin_yaw;
p2.y = y - w / 2 * sin_yaw - l / 2 * cos_yaw;
// rear right
p3.x = x + w / 2 * cos_yaw + l / 2 * sin_yaw;
p3.y = y + w / 2 * sin_yaw - l / 2 * cos_yaw;
// front right
p4.x = x + w / 2 * cos_yaw - l / 2 * sin_yaw;
p4.y = y + w / 2 * sin_yaw + l / 2 * cos_yaw;
pts.push_back(p1);
pts.push_back(p2);
pts.push_back(p3);
pts.push_back(p4);
}
float Tracking::get_IOU(cv::Point *PointArray1, cv::Point *PointArray2)
{
cv::Mat img = cv::Mat::zeros(608, 608, CV_8UC3);
cv::Mat tmp;
vector<cv::Point> idx;
cv::fillConvexPoly(img, PointArray1, 4, cv::Scalar(255, 0, 0));
cv::cvtColor(img, tmp, CV_RGB2GRAY);
cv::findNonZero(tmp, idx);
float area1 = idx.size();
cv::Mat img2 = cv::Mat::zeros(img.size(), CV_8UC3);
cv::fillConvexPoly(img2, PointArray2, 4, cv::Scalar(0, 255, 0));
cv::cvtColor(img2, tmp, CV_RGB2GRAY);
cv::findNonZero(tmp, idx);
float area2 = idx.size();
cv::fillConvexPoly(img2, PointArray1, 4, cv::Scalar(0, 0, 255));
cv::cvtColor(img2, tmp, CV_RGB2GRAY);
cv::findNonZero(tmp, idx);
float union_area = idx.size();
float inter_area = area1 + area2 - union_area;
float IOU = 0.0;
if (area1 != 0)
{
IOU = inter_area / area1;
}
return IOU;
}
void Tracking::PredictCarPos(Car *car)
{
if (!car->isBad())
{
car->mvmBboxWorldPosePredict.clear();
for (auto t : car->mvmBboxWorldPose)
{
cv::Mat tmp = car->mRcl * t + car->mtcl;
car->mvmBboxWorldPosePredict.push_back(tmp);
}
}
// car->mmPredWorldPos = car->mmVelocity * car->mWorldPos;
}
void Tracking::Reset()
{
cout << "System Reseting" << endl;
if (mpViewer)
{
mpViewer->RequestStop();
while (!mpViewer->isStopped())
usleep(3000);
}
// Reset Local Mapping
cout << "Reseting Local Mapper...";
mpLocalMapper->RequestReset();
cout << " done" << endl;
// Reset Loop Closing
cout << "Reseting Loop Closing...";
mpLoopClosing->RequestReset();
cout << " done" << endl;
// Clear BoW Database
cout << "Reseting Database...";
mpKeyFrameDB->clear();
cout << " done" << endl;
// Clear Map (this erase MapPoints and KeyFrames)
mpMap->clear();
KeyFrame::nNextId = 0;
Frame::nNextId = 0;
mState = NO_IMAGES_YET;
if (mpInitializer)
{
delete mpInitializer;
mpInitializer = static_cast<Initializer *>(NULL);
}
mlRelativeFramePoses.clear();
mlpReferences.clear();
mlFrameTimes.clear();
mlbLost.clear();
if (mpViewer)
mpViewer->Release();
}
void Tracking::ChangeCalibration(const string &strSettingPath)
{
cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ);
float fx = fSettings["Camera.fx"];
float fy = fSettings["Camera.fy"];
float cx = fSettings["Camera.cx"];
float cy = fSettings["Camera.cy"];
cv::Mat K = cv::Mat::eye(3, 3, CV_32F);
K.at<float>(0, 0) = fx;
K.at<float>(1, 1) = fy;
K.at<float>(0, 2) = cx;
K.at<float>(1, 2) = cy;
K.copyTo(mK);
cv::Mat DistCoef(4, 1, CV_32F);
DistCoef.at<float>(0) = fSettings["Camera.k1"];
DistCoef.at<float>(1) = fSettings["Camera.k2"];
DistCoef.at<float>(2) = fSettings["Camera.p1"];
DistCoef.at<float>(3) = fSettings["Camera.p2"];
const float k3 = fSettings["Camera.k3"];
if (k3 != 0)
{
DistCoef.resize(5);
DistCoef.at<float>(4) = k3;
}
DistCoef.copyTo(mDistCoef);
mbf = fSettings["Camera.bf"];
Frame::mbInitialComputations = true;
}
void Tracking::InformOnlyTracking(const bool &flag)
{
mbOnlyTracking = flag;
}
// bev视图当前到bev世界坐标转换
void Tracking::bev_current_bevworld(cv::Point &p_c, cv::Point &p_l, cv::Mat &T12)
{
cv::Mat R12, t12;
R12 = T12.rowRange(0, 3).colRange(0, 3);
t12 = T12.rowRange(0, 3).col(3);
cv::Mat p_uv_c_bev, p_cam_c_bev, p_cam_c_cam, p_cam_l_cam, p_cam_l_bev, p_uv_l_bev;
p_uv_c_bev = cv::Mat::zeros(3, 1, CV_32FC1);
p_cam_c_bev = cv::Mat::zeros(3, 1, CV_32FC1);
p_cam_c_cam = cv::Mat::zeros(3, 1, CV_32FC1);
p_cam_l_cam = cv::Mat::zeros(3, 1, CV_32FC1);
p_cam_l_bev = cv::Mat::zeros(3, 1, CV_32FC1);
p_uv_l_bev = cv::Mat::zeros(3, 1, CV_32FC1); // p_uv_c_bev bev视图当前current像素uv坐标系
p_uv_c_bev.at<float>(0, 0) = p_c.x; // 取bev像素坐标
p_uv_c_bev.at<float>(1, 0) = p_c.y;
p_uv_c_bev.at<float>(2, 0) = 1.0;
p_cam_c_bev = m_bev_K_invers * p_uv_c_bev; // bev像素坐标转bev相机坐标
cv::Mat R = (cv::Mat_<float>(3, 3) << 1, 0, 0, 0, 0, 1, 0, -1, 0);
p_cam_c_cam = R * p_cam_c_bev; // bev相机坐标转cam的相机坐标
p_cam_l_cam = R12 * p_cam_c_cam + t12; // 当前cam相机坐标转到过去的cam相机坐标
// p_cam_l_cam.at<float>(1, 0) = 1.0;
p_cam_l_bev = R.t() * p_cam_l_cam; // 过去cam相机坐标转过去bev相机坐标
p_uv_l_bev = m_bev_K * p_cam_l_bev; // 过去bev相机坐标转过去bev像素坐标
p_l.x = p_uv_l_bev.at<float>(0, 0) / p_cam_l_bev.at<float>(2, 0);
p_l.y = p_uv_l_bev.at<float>(1, 0) / p_cam_l_bev.at<float>(2, 0);
}
void Tracking::Get_Motion_Car_V(Car *car)
{
double vel = 3.6 * car->get_Vel(mdelta_t); // km/h
float z = car->mCarWorldPose.at<float>(2, 0);
float x = car->mCarWorldPose.at<float>(0, 0);
float y = car->mCarWorldPose.at<float>(1, 0);
cv::Mat car_wo = (cv::Mat_<float>(3, 1) << x, y, z);
cv::Mat car_co = mCurrentFrame.mRcw * car_wo + mCurrentFrame.mtcw; // 转相机坐标系
int u, v;
float fx, fy, cx, cy;
fx = mK.at<float>(0, 0);
fy = mK.at<float>(1, 1);
cx = mK.at<float>(0, 2);
cy = mK.at<float>(1, 2);
u = fx * (car_co.at<float>(0, 0) - 0.8) / (car_co.at<float>(2, 0) - 1) + cx;
v = fy * (car_co.at<float>(1, 0) - 0) / (car_co.at<float>(2, 0) - 1) + cy;
vector<int> tmp;
tmp.push_back(u);
tmp.push_back(v);
tmp.push_back(vel);
mvvCarV.push_back(tmp);
}
void Tracking::world2bev(vector<cv::Mat> &vm_3d_w, vector<cv::Point> &vp_bev, cv::Mat &Tcw) // 世界坐标转bev像素
{
for (auto p_3d_w : vm_3d_w)
{
cv::Mat Rcw, tcw, R_ij;
cv::Mat p_3d_w_mat = cv::Mat::zeros(3, 1, CV_32FC1);
cv::Point p_bev;
p_3d_w_mat.at<float>(0, 0) = p_3d_w.at<float>(0, 0); // 取bev像素坐标
p_3d_w_mat.at<float>(1, 0) = p_3d_w.at<float>(1, 0);
p_3d_w_mat.at<float>(2, 0) = p_3d_w.at<float>(2, 0);
Rcw = Tcw.rowRange(0, 3).colRange(0, 3);
tcw = Tcw.rowRange(0, 3).col(3);
cv::Mat p_3d_c, p_3d_bev, p_uv_bev;
p_3d_c = Rcw * p_3d_w_mat + tcw; // 世界坐标转相机坐标
cv::Mat Rbc = (cv::Mat_<float>(3, 3) << 1, 0, 0, 0, 0, -1, 0, 1, 0); // 相机转bev的矩阵
p_3d_bev = Rbc * p_3d_c; // 相机转bev
p_3d_bev.at<float>(2, 0) = 1.0; // 归一
p_uv_bev = m_bev_K * p_3d_bev; // 过去bev相机坐标转过去bev像素坐标
p_bev.x = p_uv_bev.at<float>(0, 0) / p_3d_bev.at<float>(2, 0);
p_bev.y = p_uv_bev.at<float>(1, 0) / p_3d_bev.at<float>(2, 0);
vp_bev.push_back(p_bev);
}
}
} // namespace ORB_SLAM