VDO-SLAM源码阅读笔记[1] Track()中动态obj部分

VDO-SLAM源码阅读笔记[1] Track()中动态obj部分,第1张

1 GetSceneFlowObj

用于获得object点云的场景流,本质上是前后两帧之间的点云运动;
①获得前后两帧之间的点3D坐标

cv::Mat x3D_p = mLastFrame.UnprojectStereoObject(i,0);
cv::Mat x3D_c = mCurrentFrame.UnprojectStereoObject(i,0);

②两者做差即为一个点的场景流

cv::Point3f flow3d;
flow3d.x = x3D_c.at<float>(0) - x3D_p.at<float>(0);
flow3d.y = x3D_c.at<float>(1) - x3D_p.at<float>(1);
flow3d.z = x3D_c.at<float>(2) - x3D_p.at<float>(2);

问题:此处点云之间的匹配关系是如何建立的需要在后续关注一下;

2 DynObjTracking

该函数完成帧间object的匹配,具体分为四步:label处理+边缘检测+运动、深度、大小检测+数量匹配

记录一下这个函数中的各阶段存储变量的意义:
UniLab:去重后的label向量(-1表示外点,0表示静态,其他表示动态); → \rightarrow
Posi:label,属于该label的点序号; → \rightarrow
sem_posi,ObjId:通过边缘检测的label,及属于该label的点序号; → \rightarrow
SemPosNew,ObjIdNew(返回值):经过场景流检测的label,及属于该label的点序号; → \rightarrow
LabId:当前帧中object的global序号

①label处理:label去重,统计每个label的数量并记录对应的点序号;

auto UniLab = mCurrentFrame.vSemObjLabel;
std::sort(UniLab.begin(), UniLab.end());
UniLab.erase(std::unique( UniLab.begin(), UniLab.end() ), UniLab.end() );

// Collect the predicted labels and semantic labels in vector
std::vector<std::vector<int> > Posi(UniLab.size());
for (int i = 0; i < mCurrentFrame.vSemObjLabel.size(); ++i)
{
  // skip outliers
  if (mCurrentFrame.vObjLabel[i]==-1)
    continue;

  // save object label
  for (int j = 0; j < UniLab.size(); ++j)
  {
    if(mCurrentFrame.vSemObjLabel[i]==UniLab[j]){
      Posi[j].push_back(i);
      break;
    }
  }
}

②边缘检测,在边缘的图像标签被置-1,不在边缘的保存期id和label;

    // 存放不在边缘的object的特征点序号向量及其对应语义标签
    std::vector<std::vector<int> > ObjId;
    std::vector<int> sem_posi; // semantic label position for the objects
    // 图像边缘阈值
    int shrin_thr_row=0, shrin_thr_col=0;
    if (mTestData==KITTI)
    {
        shrin_thr_row = 25;
        shrin_thr_col = 50;
    }
    // 遍历label,再遍历对应label的特征点
    for (int i = 0; i < Posi.size(); ++i)
    {
        // shrink the image to get rid of object parts on the boundary
        float count = 0, count_thres=0.5;
        for (int j = 0; j < Posi[i].size(); ++j)
        {
            const float u = mCurrentFrame.mvObjKeys[Posi[i][j]].pt.x;
            const float v = mCurrentFrame.mvObjKeys[Posi[i][j]].pt.y;
            // 图像边缘检测
            if ( v<shrin_thr_row || v>(mImGray.rows-shrin_thr_row) || u<shrin_thr_col || u>(mImGray.cols-shrin_thr_col) )
                count = count + 1;
        }
        // 有效数量阈值检测
        if (count/Posi[i].size()>count_thres)
        {
            // cout << "Most part of this object is on the image boundary......" << endl;
            for (int k = 0; k < Posi[i].size(); ++k)
                mCurrentFrame.vObjLabel[Posi[i][k]] = -1;
            continue;
        }
        else
        {
            ObjId.push_back(Posi[i]);
            sem_posi.push_back(UniLab[i]);
        }
    }

🌂对不在边缘的object做运动检测(场景流的运动情况)+深度检测(点云平均深度判断)+大小检测(特征点数量);

	// 通过运动+深度+大小检测的object的特征点序号向量及其语义标签
	std::vector<std::vector<int> > ObjIdNew;
    std::vector<int> SemPosNew, obj_dis_tres(sem_posi.size(),0);
    // 遍历不在边缘的动态物体,再遍历其内的点
    for (int i = 0; i < ObjId.size(); ++i)
    {

        float obj_center_depth = 0,         // object中点的深度累加(用于计算平均深度)
        sf_min=100,                                         // 场景流最小运动
        sf_max=0,                                           // 场景流最大运动
        sf_mean=0,                                          // 场景流平均运动距离
        sf_count=0;                                         // 运动程度小于阈值的数目
        std::vector<int> sf_range(10,0);        // 将运动大小分为十个区域
        for (int j = 0; j < ObjId[i].size(); ++j)
        {
            obj_center_depth = obj_center_depth + mCurrentFrame.mvObjDepth[ObjId[i][j]];
            // const float sf_norm = cv::norm(mCurrentFrame.vFlow_3d[ObjId[i][j]]);
            // 场景流2范数即距离
            float sf_norm = std::sqrt(mCurrentFrame.vFlow_3d[ObjId[i][j]].x*mCurrentFrame.vFlow_3d[ObjId[i][j]].x + mCurrentFrame.vFlow_3d[ObjId[i][j]].z*mCurrentFrame.vFlow_3d[ObjId[i][j]].z);
            if (sf_norm<fSFMgThres)
                sf_count = sf_count+1;
            if(sf_norm<sf_min)
                sf_min = sf_norm;
            if(sf_norm>sf_max)
                sf_max = sf_norm;
            sf_mean = sf_mean + sf_norm;
            {
                if (0.0<=sf_norm && sf_norm<0.05)
                    sf_range[0] = sf_range[0] + 1;
                else if (0.05<=sf_norm && sf_norm<0.1)
                    sf_range[1] = sf_range[1] + 1;
                else if (0.1<=sf_norm && sf_norm<0.2)
                    sf_range[2] = sf_range[2] + 1;
                else if (0.2<=sf_norm && sf_norm<0.4)
                    sf_range[3] = sf_range[3] + 1;
                else if (0.4<=sf_norm && sf_norm<0.8)
                    sf_range[4] = sf_range[4] + 1;
                else if (0.8<=sf_norm && sf_norm<1.6)
                    sf_range[5] = sf_range[5] + 1;
                else if (1.6<=sf_norm && sf_norm<3.2)
                    sf_range[6] = sf_range[6] + 1;
                else if (3.2<=sf_norm && sf_norm<6.4)
                    sf_range[7] = sf_range[7] + 1;
                else if (6.4<=sf_norm && sf_norm<12.8)
                    sf_range[8] = sf_range[8] + 1;
                else if (12.8<=sf_norm && sf_norm<25.6)
                    sf_range[9] = sf_range[9] + 1;
            }
        }

        // 运动较大点所占比例阈值检测,未通过的被标记为静态
        if (sf_count/ObjId[i].size()>fSFDsThres)
        {
            // label this object as static background
            for (int k = 0; k < ObjId[i].size(); ++k)
                mCurrentFrame.vObjLabel[ObjId[i][k]] = 0;
            continue;
        }
        // 平均深度检测+数量检测(大小检测)
        else if (obj_center_depth/ObjId[i].size()>mThDepthObj || ObjId[i].size()<150)
        {
            obj_dis_tres[i]=-1;
            // cout << "object " << sem_posi[i] <<" is too far away or too small! " << obj_center_depth/ObjId[i].size() << endl;
            // label this object as far away object
            for (int k = 0; k < ObjId[i].size(); ++k)
                mCurrentFrame.vObjLabel[ObjId[i][k]] = -1;
            continue;
        }
        else
        {
            // cout << "get new objects!" << endl;
            ObjIdNew.push_back(ObjId[i]);
            SemPosNew.push_back(sem_posi[i]);
        }
    }

④和上一帧匹配度最高的objec构成匹配,否则(没找到或者第一帧)新建object;

    // 初始情况,max_id表示当前的object序号
    if (f_id==1)
        max_id = 1;

    // save current label id
    // 当前帧中object的global序号
    std::vector<int> LabId(ObjIdNew.size());
    // 遍历当前帧的object,再遍历其特征点
    for (int i = 0; i < ObjIdNew.size(); ++i)
    {
        // save semantic labels in last frame
        // 保存上一帧的label
        std::vector<int> Lb_last;
        for (int k = 0; k < ObjIdNew[i].size(); ++k)
            Lb_last.push_back(mLastFrame.vSemObjLabel[ObjIdNew[i][k]]);

        // find label that appears most in Lb_last()
        // 对上一帧label计数并排序
        // (1) count duplicates
        std::map<int, int> dups;
        for(int k : Lb_last)
            ++dups[k];
        // (2) and sort them by descending order
        std::vector<std::pair<int, int> > sorted;
        for (auto k : dups)
            sorted.push_back(std::make_pair(k.first,k.second));
        std::sort(sorted.begin(), sorted.end(), SortPairInt);

        // label the object in current frame
        int New_lab = sorted[0].first;
        // cout << " what is in the new label: " << New_lab << endl;
        // 初始情况,ObjIdNew中每个元素都创建为新的object
        if (max_id==1)
        {
            LabId[i] = max_id;
            for (int k = 0; k < ObjIdNew[i].size(); ++k)
                mCurrentFrame.vObjLabel[ObjIdNew[i][k]] = max_id;
            max_id = max_id + 1;
        }
        else
        {
            bool exist = false;
            // 同上一帧匹配度最高的构成连接
            for (int k = 0; k < mLastFrame.nSemPosition.size(); ++k)
            {
                if (mLastFrame.nSemPosition[k]==New_lab && mLastFrame.bObjStat[k])
                {
                    LabId[i] = mLastFrame.nModLabel[k];
                    for (int k = 0; k < ObjIdNew[i].size(); ++k)
                        mCurrentFrame.vObjLabel[ObjIdNew[i][k]] = LabId[i];
                    exist = true;
                    break;
                }
            }
            // 没找到匹配时新建object
            if (exist==false)
            {
                LabId[i] = max_id;
                for (int k = 0; k < ObjIdNew[i].size(); ++k)
                    mCurrentFrame.vObjLabel[ObjIdNew[i][k]] = max_id;
                max_id = max_id + 1;
            }
        }

    }
3 Object Motion Estimation(Track函数中的部分不是函数)

此处计算object的运动,主要是两部分:初始的PnP/Ransac求解+前后两帧的优化求解:
①PnP/Ransac求解;

	// 当前object的点序号向量
    std::vector<int> ObjIdTest = ObjIdNew[i];
    mCurrentFrame.vnObjID[i] = ObjIdTest;
    // 存储当前计算的位姿的内点情况
    std::vector<int> ObjIdTest_in;
    mCurrentFrame.mInitModel = GetInitModelObj(ObjIdTest,ObjIdTest_in,i);

GetInitModelObj中调用opencv的两个库函数完成计算,这里有个问题,用于计算的3D点和2D点分别是上一帧的object恢复出的空间点,和当前帧观测到的object像素坐标。因此就我个人理解,他们解出来的位姿:是受object运动影响后的相机运动

	// 从3D-2D匹配恢复出位姿变换
	cv::solvePnPRansac(pre_3d, cur_2d, camera_mat, distCoeffs, Rvec, Tvec, false,
               iter_num, reprojectionError, confidence, inliers, cv::SOLVEPNP_AP3P); // AP3P EPNP P3P ITERATIVE DLS
	// 从角轴恢复出旋转矩阵
    cv::Rodrigues(Rvec, d);

②优化求解位姿,此处是否使用光流有两种方法,先关注不用光流的那种;

    std::vector<int> InlierID;
    // 使用光流优化
    if (bJoint)
    {
        cv::Mat Obj_X_tmp = Optimizer::PoseOptimizationFlow2(&mCurrentFrame,&mLastFrame,ObjIdTest_in,InlierID);
        mCurrentFrame.vObjMod[i] = Converter::toInvMatrix(mCurrentFrame.mTcw)*Obj_X_tmp;
    }
    // 不使用光流优化
    else
        mCurrentFrame.vObjMod[i] = Optimizer::PoseOptimizationObjMot(&mCurrentFrame,&mLastFrame,ObjIdTest_in,InlierID);
3.1 PoseOptimizationObjMot

①这个优化问题中的第一个顶点为object的位姿,也是我们最终又要求解的返回量。

    g2o::VertexSE3Expmap * vSE3 = new g2o::VertexSE3Expmap();
    // cv::Mat Init = cv::Mat::eye(4,4,CV_32F); // initial with identity matrix
    // 用当前帧的相机位姿(用静态点计算)和当前帧的object相机位姿(用object点计算)
    cv::Mat Init = Converter::toInvMatrix(pCurFrame->mTcw)*pCurFrame->mInitModel; // initial with identity matrix
    vSE3->setEstimate(Converter::toSE3Quat(Init));
    vSE3->setId(0);
    vSE3->setFixed(false);
    optimizer.addVertex(vSE3);

其中Init的计算原理可以通过下图表示:
mTcw表示相机的位姿运动,实际意义为前后两帧之间相机位姿的变化,在图中为两个实线框之间的位姿变换;
mInitModel表示通过object上的点计算的相机位姿变化,在图中表示左实线框到虚线框的位姿变换;
观察其实可以发现,右边的虚线框到实线框的位姿变化和object的位姿变化是等价的,这就是代码中计算Init式子的原理;

②作者自定义的一元边,顶点为待优化的位姿,误差为上一帧的点经过顶点表示的位姿变化后再投影到当前帧的投影误差,在原文中对应的公式为:
e o b j i = p k i − π ( X k − 1 H k m k − 1 i ) = p k i − π ( G k m k − 1 i ) e_{obj}^i = p_k^i - \pi(X_k^{-1}H_km_{k-1}^i) = p_k^i - \pi(G_km_{k-1}^i) eobji=pkiπ(Xk1Hkmk1i)=pkiπ(Gkmk1i)代码中 X w X_w Xw对应式子中的 m k − 1 m_{k-1} mk1表示上一帧点位置, P P PP PP对应式子中的 π ( X k − 1 . . . ) \pi(X_k^{-1}...) π(Xk1...)表示投影过程,顶点对应 H k H_k Hk表示待优化位姿;

	for(int i=0; i<N; i++)
    {
        if(mono)
        {
            nInitialCorrespondences++;
            vIsOutlier[i] = false;

            // 观测真值
            Eigen::Matrix<double,2,1> obs;
            const cv::KeyPoint &kpUn = pCurFrame->mvObjKeys[ObjId[i]];
            obs << kpUn.pt.x, kpUn.pt.y;

            // 自己定义的边
            g2o::EdgeSE3ProjectXYZOnlyObjMotion* e = new g2o::EdgeSE3ProjectXYZOnlyObjMotion();

            e->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(0)));
            e->setMeasurement(obs);
            // const float invSigma2 = 1.0;
            Eigen::Matrix2d invSigma2;
            // invSigma2 << 1.0/flo_co.x, 0, 0, 1.0/flo_co.y;
            invSigma2 << 1.0, 0, 0, 1.0;
            e->setInformation(Eigen::Matrix2d::Identity()*invSigma2);

            // add projection matrix
            // 指定投影矩阵,点的世界坐标Xw
            e->P = PP;

            cv::Mat Xw = pLastFrame->UnprojectStereoObject(ObjId[i],0);
            // transfer to preconditioning coordinate
            // Xw = R_wp_inv*Xw+t_wp_inv;

            e->Xw[0] = Xw.at<float>(0);
            e->Xw[1] = Xw.at<float>(1);
            e->Xw[2] = Xw.at<float>(2);

            // 添加边
            optimizer.addEdge(e);

            vpEdgesMono.push_back(e);
            vnIndexEdgeMono.push_back(i);
        }
    }

🌂启动优化

    // 第一个自定义的参数rp_thres设定得很小,可能是一开始要严格选择参与优化的边
    const float chi2Mono[4]={rp_thres,5.991,5.991,5.991}; // {5.991,5.991,5.991,5.991} {4,4,4,4}
    const int its[4]={200,100,100,100};

    int nBad=0;
    cout << endl;
    // 这里应该是写错了,应该是4
    for(size_t it=0; it<1; it++)
    {

        vSE3->setEstimate(Converter::toSE3Quat(Init));
        optimizer.initializeOptimization(0);
        optimizer.optimize(its[it]);

        nBad=0;

        // monocular
        // cout << endl << "chi2: " << endl;
        // 优化后遍历所有边,确定下一次参与优化的边的数量
        for(size_t i=0, iend=vpEdgesMono.size(); i<iend; i++)
        {
            g2o::EdgeSE3ProjectXYZOnlyObjMotion* e = vpEdgesMono[i];

            const size_t idx = vnIndexEdgeMono[i];

            if(vIsOutlier[idx])
            {
                e->computeError();
            }

            // 根据误差确定参与下一次优化的边
            const float chi2 = e->chi2();

            if(chi2>chi2Mono[it])
            {
                vIsOutlier[idx]=true;
                e->setLevel(1);
                nBad++;
            }
            else
            {
                // ++++ new added for calculating re-projection error +++
                if (it==0)
                {
                    repro_e = repro_e + std::sqrt(chi2);
                }
                vIsOutlier[idx]=false;
                e->setLevel(0);
            }

            // 两次迭代后取消核函数
            if(it==2)
                e->setRobustKernel(0);
        }

        if(optimizer.edges().size()<5)
            break;
    }

④误差计算,代码中速度的计算对应论文中式24:
v ≈ m k i − m k − 1 i = ( H k − I 4 ) m k − 1 i = t k − ( I − R k ) m k − 1 i v \approx m_k^i - m_{k-1}^i = (H_k-I_4)m_{k-1}^i=t_k - (I-R_k)m_{k-1}^i vmkimk1i=(HkI4)mk1i=tk(IRk)mk1i齐次变换部分对应论文中式23:
L k − 1 H k = L k − 1 − 1 H k L k − 1 ^{L_{k-1}}H_k = L_{k-1}^{-1}H_{k}L_{k-1} Lk1Hk=Lk11HkLk1

	// 对应论文式子24
    cv::Mat sp_gt_v, sp_gt_v2;
    sp_gt_v = H_p_c.rowRange(0,3).col(3) - (cv::Mat::eye(3,3,CV_32F)-H_p_c.rowRange(0,3).colRange(0,3))*ObjCentre3D_pre; // L_w_p.rowRange(0,3).col(3) or ObjCentre3D_pre
    float sp_gt_norm = std::sqrt( sp_gt_v.at<float>(0)*sp_gt_v.at<float>(0) + sp_gt_v.at<float>(1)*sp_gt_v.at<float>(1) + sp_gt_v.at<float>(2)*sp_gt_v.at<float>(2) )*36;
    mCurrentFrame.vObjSpeed_gt[i] = sp_gt_norm;

    // // ***** calculate the estimated object speed *****
    // 对应论文式子24
    cv::Mat sp_est_v;
    sp_est_v = mCurrentFrame.vObjMod[i].rowRange(0,3).col(3) - (cv::Mat::eye(3,3,CV_32F)-mCurrentFrame.vObjMod[i].rowRange(0,3).colRange(0,3))*ObjCentre3D_pre;
    float sp_est_norm = std::sqrt( sp_est_v.at<float>(0)*sp_est_v.at<float>(0) + sp_est_v.at<float>(1)*sp_est_v.at<float>(1) + sp_est_v.at<float>(2)*sp_est_v.at<float>(2) )*36;

    cout << "estimated and ground truth object speed: " << sp_est_norm << "km/h " << sp_gt_norm << "km/h " << endl;

	// 不理解为什么*36
    mCurrentFrame.vSpeed[i].x = sp_est_norm*36;
    mCurrentFrame.vSpeed[i].y = sp_gt_norm*36;


    // // ************** calculate the relative pose error *****************
    // 对应论文式23
    cv::Mat H_p_c_body_est = L_w_p_inv*mCurrentFrame.vObjMod[i]*L_w_p;
    // 和真值比较
    cv::Mat RePoEr = Converter::toInvMatrix(H_p_c_body_est)*H_p_c_body;
    
    // 计算旋转和平移的误差
    float t_rpe = std::sqrt( RePoEr.at<float>(0,3)*RePoEr.at<float>(0,3) + RePoEr.at<float>(1,3)*RePoEr.at<float>(1,3) + RePoEr.at<float>(2,3)*RePoEr.at<float>(2,3) );
    float trace_rpe = 0;
    for (int i = 0; i < 3; ++i)
    {
        if (RePoEr.at<float>(i,i)>1.0)
                trace_rpe = trace_rpe + 1.0-(RePoEr.at<float>(i,i)-1.0);
        else
            trace_rpe = trace_rpe + RePoEr.at<float>(i,i);
    }
    float r_rpe = acos( ( trace_rpe -1.0 )/2.0 )*180.0/3.1415926;
    cout << "the relative pose error of the object, " << "t: " << t_rpe <<  " R: " << r_rpe << endl;
4 local/global optimization

太长了,放在下一篇

总结

梳理了VDO-SLAM的Track()函数中和动态obj相关的部分,详细注释可以去我的github下载带注释的源码

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/1325563.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-06-12
下一篇 2022-06-12

发表评论

登录后才能评论

评论列表(0条)

保存