diff --git a/ReadMe.md b/ReadMe.md
index 76cad4a86..229bb915e 100644
--- a/ReadMe.md
+++ b/ReadMe.md
@@ -19,6 +19,7 @@ Please take a look at the feature list below for full details on what the system
## News / Events
+* **May 18, 2020** - Released secondary pose graph example repository [ov_secondary](https://github.com/rpng/ov_secondary) based on [VINS-Fusion](https://github.com/HKUST-Aerial-Robotics/VINS-Fusion). OpenVINS now publishes marginalized feature track, feature 3d position, and first camera intrinsics and extrinsics. See [PR#66](https://github.com/rpng/open_vins/pull/66) for details and discussion.
* **April 3, 2020** - Released [v2.0](https://github.com/rpng/open_vins/releases/tag/v2.0) update to the codebase with some key refactoring, ros-free building, improved dataset support, and single inverse depth feature representation. Please check out the [release page](https://github.com/rpng/open_vins/releases/tag/v2.0) for details.
* **January 21, 2020** - Our paper has been accepted for presentation in [ICRA 2020](https://www.icra2020.org/). We look forward to seeing everybody there! We have also added links to a few videos of the system running on different datasets.
* **October 23, 2019** - OpenVINS placed first in the [IROS 2019 FPV Drone Racing VIO Competition
@@ -63,9 +64,27 @@ Please take a look at the feature list below for full details on what the system
* Out of the box evaluation on EurocMav and TUM-VI datasets
* Extensive evaluation suite (ATE, RPE, NEES, RMSE, etc..)
-## Demo Videos
+## Codebase Extensions
+
+* **[ov_secondary](https://github.com/rpng/ov_secondary)** -
+This is an example secondary thread which provides loop closure in a loosely coupled manner for [OpenVINS](https://github.com/rpng/open_vins).
+This is a modification of the code originally developed by the HKUST aerial robotics group and can be found in their [VINS-Fusion](https://github.com/HKUST-Aerial-Robotics/VINS-Fusion) repository.
+Here we stress that this is a loosely coupled method, thus no information is returned to the estimator to improve the underlying OpenVINS odometry.
+This codebase has been modified in a few key areas including: exposing more loop closure parameters, subscribing to camera intrinsics, simplifying configuration such that only topics need to be supplied, and some tweaks to the loop closure detection to improve frequency.
+
+* **[ov_maplab](https://github.com/rpng/ov_maplab)** -
+This codebase contains the interface wrapper for exporting visual-inertial runs from [OpenVINS](https://github.com/rpng/open_vins) into the ViMap structure taken by [maplab](https://github.com/ethz-asl/maplab).
+The state estimates and raw images are appended to the ViMap as OpenVINS runs through a dataset.
+After completion of the dataset, features are re-extract and triangulate with maplab's feature system.
+This can be used to merge multi-session maps, or to perform a batch optimization after first running the data through OpenVINS.
+Some example have been provided along with a helper script to export trajectories into the standard groundtruth format.
+
+
+
+## Demo Videos
+
[![](docs/youtube/KCX51GvYGss.jpg)](http://www.youtube.com/watch?v=KCX51GvYGss "OpenVINS - EuRoC MAV Vicon Rooms Flyby")
[![](docs/youtube/Lc7VQHngSuQ.jpg)](http://www.youtube.com/watch?v=Lc7VQHngSuQ "OpenVINS - TUM VI Datasets Flyby")
[![](docs/youtube/vaia7iPaRW8.jpg)](http://www.youtube.com/watch?v=vaia7iPaRW8 "OpenVINS - UZH-FPV Drone Racing Dataset Flyby")
@@ -76,6 +95,7 @@ Please take a look at the feature list below for full details on what the system
[![](docs/youtube/ExPIGwORm4E.jpg)](http://www.youtube.com/watch?v=ExPIGwORm4E "OpenVINS - UZH-FPV Drone Racing Dataset Demonstration")
+
## Credit / Licensing
This code was written by the [Robot Perception and Navigation Group (RPNG)](https://sites.udel.edu/robot/) at the University of Delaware.
diff --git a/ov_msckf/launch/pgeneva_ros_tum.launch b/ov_msckf/launch/pgeneva_ros_tum.launch
index 9aed4f438..dc9b96f6e 100644
--- a/ov_msckf/launch/pgeneva_ros_tum.launch
+++ b/ov_msckf/launch/pgeneva_ros_tum.launch
@@ -4,11 +4,11 @@
-
+
-
+
diff --git a/ov_msckf/launch/pgeneva_serial_tum.launch b/ov_msckf/launch/pgeneva_serial_tum.launch
index 824f59558..7bf1410f1 100644
--- a/ov_msckf/launch/pgeneva_serial_tum.launch
+++ b/ov_msckf/launch/pgeneva_serial_tum.launch
@@ -32,7 +32,7 @@
-
+
[0.0,0.0,9.80766]
diff --git a/ov_msckf/src/core/RosVisualizer.cpp b/ov_msckf/src/core/RosVisualizer.cpp
index ee29bc3b3..8cbd178f7 100644
--- a/ov_msckf/src/core/RosVisualizer.cpp
+++ b/ov_msckf/src/core/RosVisualizer.cpp
@@ -59,6 +59,12 @@ RosVisualizer::RosVisualizer(ros::NodeHandle &nh, VioManager* app, Simulator *si
pub_pathgt = nh.advertise("/ov_msckf/pathgt", 2);
ROS_INFO("Publishing: %s", pub_pathgt.getTopic().c_str());
+ // Keyframe publishers
+ pub_keyframe_pose = nh.advertise("/ov_msckf/keyframe_pose", 1000);
+ pub_keyframe_point = nh.advertise("/ov_msckf/keyframe_feats", 1000);
+ pub_keyframe_extrinsic = nh.advertise("/ov_msckf/keyframe_extrinsic", 1000);
+ pub_keyframe_intrinsics = nh.advertise("/ov_msckf/keyframe_intrinsics", 1000);
+
// option to enable publishing of global to IMU transformation
nh.param("publish_global_to_imu_tf", publish_global2imu_tf, true);
nh.param("publish_calibration_tf", publish_calibration_tf, true);
@@ -133,6 +139,9 @@ void RosVisualizer::visualize() {
// Publish gt if we have it
publish_groundtruth();
+ // Publish keyframe information
+ publish_keyframe_information();
+
// save total state
if(save_total_state)
sim_save_total_state_to_file();
@@ -670,6 +679,123 @@ void RosVisualizer::publish_groundtruth() {
+void RosVisualizer::publish_keyframe_information() {
+
+
+ // Check if we have subscribers
+ if(pub_keyframe_pose.getNumSubscribers()==0 && pub_keyframe_point.getNumSubscribers()==0 &&
+ pub_keyframe_extrinsic.getNumSubscribers()==0 && pub_keyframe_intrinsics.getNumSubscribers()==0)
+ return;
+
+
+ // Skip if we don't have a marginalized frame yet
+ double hist_last_marginalized_time;
+ Eigen::Matrix stateinG;
+ if(!_app->hist_last_marg_state(hist_last_marginalized_time, stateinG))
+ return;
+
+ // Default header
+ std_msgs::Header header;
+ header.stamp = ros::Time(hist_last_marginalized_time);
+
+ //======================================================
+ // PUBLISH IMU TO CAMERA0 EXTRINSIC
+ // need to flip the transform to the IMU frame
+ Eigen::Vector4d q_ItoC = _app->get_state()->_calib_IMUtoCAM.at(0)->quat();
+ Eigen::Vector3d p_CinI = -_app->get_state()->_calib_IMUtoCAM.at(0)->Rot().transpose()*_app->get_state()->_calib_IMUtoCAM.at(0)->pos();
+ nav_msgs::Odometry odometry_calib;
+ odometry_calib.header = header;
+ odometry_calib.header.frame_id = "imu";
+ odometry_calib.pose.pose.position.x = p_CinI(0);
+ odometry_calib.pose.pose.position.y = p_CinI(1);
+ odometry_calib.pose.pose.position.z = p_CinI(2);
+ odometry_calib.pose.pose.orientation.x = q_ItoC(0);
+ odometry_calib.pose.pose.orientation.y = q_ItoC(1);
+ odometry_calib.pose.pose.orientation.z = q_ItoC(2);
+ odometry_calib.pose.pose.orientation.w = q_ItoC(3);
+ pub_keyframe_extrinsic.publish(odometry_calib);
+
+
+ //======================================================
+ // PUBLISH CAMERA0 INTRINSICS
+ sensor_msgs::CameraInfo cameraparams;
+ cameraparams.header = header;
+ cameraparams.header.frame_id = "imu";
+ cameraparams.distortion_model = (_app->get_state()->_cam_intrinsics_model.at(0))? "equidistant" : "plumb_bob";
+ Eigen::VectorXd cparams = _app->get_state()->_cam_intrinsics.at(0)->value();
+ cameraparams.D = {cparams(4), cparams(5), cparams(6), cparams(7)};
+ cameraparams.K = {cparams(0), 0, cparams(2), 0, cparams(1), cparams(3), 0, 0, 1};
+ pub_keyframe_intrinsics.publish(cameraparams);
+
+
+ //======================================================
+ // PUBLISH HISTORICAL POSE ESTIMATE
+ nav_msgs::Odometry odometry_pose;
+ odometry_pose.header = header;
+ odometry_pose.header.frame_id = "global";
+ odometry_pose.pose.pose.position.x = stateinG(4);
+ odometry_pose.pose.pose.position.y = stateinG(5);
+ odometry_pose.pose.pose.position.z = stateinG(6);
+ odometry_pose.pose.pose.orientation.x = stateinG(0);
+ odometry_pose.pose.pose.orientation.y = stateinG(1);
+ odometry_pose.pose.pose.orientation.z = stateinG(2);
+ odometry_pose.pose.pose.orientation.w = stateinG(3);
+ pub_keyframe_pose.publish(odometry_pose);
+
+
+ //======================================================
+ // PUBLISH FEATURE TRACKS IN THE GLOBAL FRAME OF REFERENCE
+
+ // Get historical feature information
+ std::unordered_map hist_feat_posinG;
+ std::unordered_map>> hist_feat_uvs;
+ std::unordered_map>> hist_feat_uvs_norm;
+ std::unordered_map>> hist_feat_timestamps;
+ _app->hist_get_features(hist_feat_posinG, hist_feat_uvs, hist_feat_uvs_norm, hist_feat_timestamps);
+
+ // Construct the message
+ sensor_msgs::PointCloud point_cloud;
+ point_cloud.header = header;
+ point_cloud.header.frame_id = "global";
+ for(const auto &feattimes : hist_feat_timestamps) {
+
+ // Skip if this feature has no extraction in the "zero" camera
+ if(feattimes.second.find(0)==feattimes.second.end())
+ continue;
+
+ // Skip if this feature does not have measurement at this time
+ auto iter = std::find(feattimes.second.at(0).begin(), feattimes.second.at(0).end(), hist_last_marginalized_time);
+ if(iter==feattimes.second.at(0).end())
+ continue;
+
+ // Get this feature information
+ size_t featid = feattimes.first;
+ size_t index = (size_t)std::distance(feattimes.second.at(0).begin(), iter);
+ Eigen::VectorXf uv = hist_feat_uvs.at(featid).at(0).at(index);
+ Eigen::VectorXf uv_n = hist_feat_uvs_norm.at(featid).at(0).at(index);
+ Eigen::Vector3d pFinG = hist_feat_posinG.at(featid);
+
+ // Push back 3d point
+ geometry_msgs::Point32 p;
+ p.x = pFinG(0);
+ p.y = pFinG(1);
+ p.z = pFinG(2);
+ point_cloud.points.push_back(p);
+
+ // Push back the norm, raw, and feature id
+ sensor_msgs::ChannelFloat32 p_2d;
+ p_2d.values.push_back(uv_n(0));
+ p_2d.values.push_back(uv_n(1));
+ p_2d.values.push_back(uv(0));
+ p_2d.values.push_back(uv(1));
+ p_2d.values.push_back(featid);
+ point_cloud.channels.push_back(p_2d);
+
+ }
+ pub_keyframe_point.publish(point_cloud);
+
+
+}
void RosVisualizer::sim_save_total_state_to_file() {
diff --git a/ov_msckf/src/core/RosVisualizer.h b/ov_msckf/src/core/RosVisualizer.h
index 8baa17a8e..353288a2e 100644
--- a/ov_msckf/src/core/RosVisualizer.h
+++ b/ov_msckf/src/core/RosVisualizer.h
@@ -27,8 +27,10 @@
#include
#include
#include
+#include
#include
#include
+#include
#include
#include
#include
@@ -102,6 +104,9 @@ namespace ov_msckf {
/// Publish groundtruth (if we have it)
void publish_groundtruth();
+ /// Publish keyframe information of the marginalized pose and tracks
+ void publish_keyframe_information();
+
/// Save current estimate state and groundtruth including calibration
void sim_save_total_state_to_file();
@@ -115,14 +120,10 @@ namespace ov_msckf {
Simulator* _sim;
// Our publishers
- ros::Publisher pub_poseimu;
- ros::Publisher pub_odomimu;
- ros::Publisher pub_pathimu;
- ros::Publisher pub_points_msckf;
- ros::Publisher pub_points_slam;
- ros::Publisher pub_points_aruco;
- ros::Publisher pub_points_sim;
+ ros::Publisher pub_poseimu, pub_odomimu, pub_pathimu;
+ ros::Publisher pub_points_msckf, pub_points_slam, pub_points_aruco, pub_points_sim;
ros::Publisher pub_tracks;
+ ros::Publisher pub_keyframe_pose, pub_keyframe_point, pub_keyframe_extrinsic, pub_keyframe_intrinsics;
tf::TransformBroadcaster *mTfBr;
// For path viz
@@ -130,8 +131,7 @@ namespace ov_msckf {
vector poses_imu;
// Groundtruth infomation
- ros::Publisher pub_pathgt;
- ros::Publisher pub_posegt;
+ ros::Publisher pub_pathgt, pub_posegt;
double summed_rmse_ori = 0.0;
double summed_rmse_pos = 0.0;
double summed_nees_ori = 0.0;
diff --git a/ov_msckf/src/core/VioManager.cpp b/ov_msckf/src/core/VioManager.cpp
index 999f9f11e..a1761f093 100644
--- a/ov_msckf/src/core/VioManager.cpp
+++ b/ov_msckf/src/core/VioManager.cpp
@@ -494,6 +494,13 @@ void VioManager::do_feature_propagate_update(double timestamp) {
//===================================================================================
+ // Collect all slam features into single vector
+ std::vector features_used_in_update = featsup_MSCKF;
+ features_used_in_update.insert(features_used_in_update.end(), feats_slam_UPDATE.begin(), feats_slam_UPDATE.end());
+ features_used_in_update.insert(features_used_in_update.end(), feats_slam_DELAYED.begin(), feats_slam_DELAYED.end());
+ update_keyframe_historical_information(features_used_in_update);
+
+
// Save all the MSCKF features used in the update
good_features_MSCKF.clear();
for(Feature* feat : featsup_MSCKF) {
@@ -516,17 +523,15 @@ void VioManager::do_feature_propagate_update(double timestamp) {
// First do anchor change if we are about to lose an anchor pose
updaterSLAM->change_anchors(state);
- // Marginalize the oldest clone of the state if we are at max length
- if((int)state->_clones_IMU.size() > state->_options.max_clone_size) {
- // Cleanup any features older then the marginalization time
- trackFEATS->get_feature_database()->cleanup_measurements(state->margtimestep());
- if(trackARUCO != nullptr) {
- trackARUCO->get_feature_database()->cleanup_measurements(state->margtimestep());
- }
- // Finally marginalize that clone
- StateHelper::marginalize_old_clone(state);
+ // Cleanup any features older then the marginalization time
+ trackFEATS->get_feature_database()->cleanup_measurements(state->margtimestep());
+ if(trackARUCO != nullptr) {
+ trackARUCO->get_feature_database()->cleanup_measurements(state->margtimestep());
}
+ // Finally marginalize the oldest clone if needed
+ StateHelper::marginalize_old_clone(state);
+
// Finally if we are optimizing our intrinsics, update our trackers
if(state->_options.do_calib_camera_intrinsics) {
// Get vectors arrays
@@ -635,10 +640,91 @@ void VioManager::do_feature_propagate_update(double timestamp) {
}
+void VioManager::update_keyframe_historical_information(const std::vector &features) {
+
+
+ // Loop through all features that have been used in the last update
+ // We want to record their historical measurements and estimates for later use
+ for(const auto &feat : features) {
+
+ // Get position of feature in the global frame of reference
+ Eigen::Vector3d p_FinG = feat->p_FinG;
+
+ // If it is a slam feature, then get its best guess from the state
+ if(state->_features_SLAM.find(feat->featid)!=state->_features_SLAM.end()) {
+ p_FinG = state->_features_SLAM.at(feat->featid)->get_xyz(false);
+ }
+
+ // Push back any new measurements if we have them
+ // Ensure that if the feature is already added, then just append the new measurements
+ if(hist_feat_posinG.find(feat->featid)!=hist_feat_posinG.end()) {
+ hist_feat_posinG.at(feat->featid) = p_FinG;
+ for(const auto &cam2uv : feat->uvs) {
+ if(hist_feat_uvs.at(feat->featid).find(cam2uv.first)!=hist_feat_uvs.at(feat->featid).end()) {
+ hist_feat_uvs.at(feat->featid).at(cam2uv.first).insert(hist_feat_uvs.at(feat->featid).at(cam2uv.first).end(), cam2uv.second.begin(), cam2uv.second.end());
+ hist_feat_uvs_norm.at(feat->featid).at(cam2uv.first).insert(hist_feat_uvs_norm.at(feat->featid).at(cam2uv.first).end(), feat->uvs_norm.at(cam2uv.first).begin(), feat->uvs_norm.at(cam2uv.first).end());
+ hist_feat_timestamps.at(feat->featid).at(cam2uv.first).insert(hist_feat_timestamps.at(feat->featid).at(cam2uv.first).end(), feat->timestamps.at(cam2uv.first).begin(), feat->timestamps.at(cam2uv.first).end());
+ } else {
+ hist_feat_uvs.at(feat->featid).insert(cam2uv);
+ hist_feat_uvs_norm.at(feat->featid).insert({cam2uv.first,feat->uvs_norm.at(cam2uv.first)});
+ hist_feat_timestamps.at(feat->featid).insert({cam2uv.first,feat->timestamps.at(cam2uv.first)});
+ }
+ }
+ } else {
+ hist_feat_posinG.insert({feat->featid,p_FinG});
+ hist_feat_uvs.insert({feat->featid,feat->uvs});
+ hist_feat_uvs_norm.insert({feat->featid,feat->uvs_norm});
+ hist_feat_timestamps.insert({feat->featid,feat->timestamps});
+ }
+ }
+
+ // Go through all our old historical vectors and find if any features should be removed
+ // In this case we know that if we have no use for features that only have info older then the last marg time
+ std::vector ids_to_remove;
+ for(const auto &id2feat : hist_feat_timestamps) {
+ bool all_older = true;
+ for(const auto &cam2time : id2feat.second) {
+ for(const auto &time : cam2time.second) {
+ if(time >= hist_last_marginalized_time) {
+ all_older = false;
+ break;
+ }
+ }
+ if(!all_older) break;
+ }
+ if(all_older) {
+ ids_to_remove.push_back(id2feat.first);
+ }
+ }
+ // Remove those features!
+ for(const auto &id : ids_to_remove) {
+ hist_feat_posinG.erase(id);
+ hist_feat_uvs.erase(id);
+ hist_feat_uvs_norm.erase(id);
+ hist_feat_timestamps.erase(id);
+ }
+ // Remove any historical states older then the marg time
+ auto it0 = hist_stateinG.begin();
+ while(it0 != hist_stateinG.end()) {
+ if(it0->first < hist_last_marginalized_time) it0 = hist_stateinG.erase(it0);
+ else it0++;
+ }
+
+ // If we have reached our max window size record the oldest clone
+ // This clone is expected to be marginalized from the state
+ if ((int) state->_clones_IMU.size() > state->_options.max_clone_size) {
+ hist_last_marginalized_time = state->margtimestep();
+ assert(hist_last_marginalized_time != INFINITY);
+ Eigen::Matrix imustate_inG = Eigen::Matrix::Zero();
+ imustate_inG.block(0,0,4,1) = state->_clones_IMU.at(hist_last_marginalized_time)->quat();
+ imustate_inG.block(4,0,3,1) = state->_clones_IMU.at(hist_last_marginalized_time)->pos();
+ hist_stateinG.insert({hist_last_marginalized_time, imustate_inG});
+ }
+}
diff --git a/ov_msckf/src/core/VioManager.h b/ov_msckf/src/core/VioManager.h
index d4b1a0497..5be14ab19 100644
--- a/ov_msckf/src/core/VioManager.h
+++ b/ov_msckf/src/core/VioManager.h
@@ -215,6 +215,29 @@ namespace ov_msckf {
return aruco_feats;
}
+ /// Returns the last timestamp we have marginalized (true if we have a state)
+ bool hist_last_marg_state(double ×tamp, Eigen::Matrix &stateinG) {
+ if(hist_last_marginalized_time != -1) {
+ timestamp = hist_last_marginalized_time;
+ stateinG = hist_stateinG.at(hist_last_marginalized_time);
+ return true;
+ } else {
+ timestamp = -1;
+ stateinG.setZero();
+ return false;
+ }
+ }
+
+ /// Returns historical feature positions, and measurements times and uvs used to get its estimate.
+ void hist_get_features(std::unordered_map &feat_posinG,
+ std::unordered_map>> &feat_uvs,
+ std::unordered_map>> &feat_uvs_norm,
+ std::unordered_map>> &feat_timestamps) {
+ feat_posinG = hist_feat_posinG;
+ feat_uvs = hist_feat_uvs;
+ feat_uvs_norm = hist_feat_uvs_norm;
+ feat_timestamps = hist_feat_timestamps;
+ }
protected:
@@ -237,6 +260,17 @@ namespace ov_msckf {
*/
void do_feature_propagate_update(double timestamp);
+
+ /**
+ * @brief This function will update our historical tracking information.
+ * This historical information includes the best estimate of a feature in the global frame.
+ * For all features it also has the normalized and raw pixel coordinates at each timestep.
+ * The state is also recorded after it is marginalized out of the state.
+ * @param features Features using in the last update phase
+ */
+ void update_keyframe_historical_information(const std::vector &features);
+
+
/// Manager parameters
VioManagerOptions params;
@@ -278,6 +312,14 @@ namespace ov_msckf {
// Startup time of the filter
double startup_time = -1;
+ // Historical information of the filter (last marg time, historical states, features seen from all frames)
+ double hist_last_marginalized_time = -1;
+ std::map> hist_stateinG;
+ std::unordered_map hist_feat_posinG;
+ std::unordered_map>> hist_feat_uvs;
+ std::unordered_map>> hist_feat_uvs_norm;
+ std::unordered_map>> hist_feat_timestamps;
+
};
diff --git a/ov_msckf/src/state/StateHelper.h b/ov_msckf/src/state/StateHelper.h
index 0c84ab129..dc7c8629a 100644
--- a/ov_msckf/src/state/StateHelper.h
+++ b/ov_msckf/src/state/StateHelper.h
@@ -215,6 +215,7 @@ namespace ov_msckf {
static void marginalize_old_clone(State *state) {
if ((int) state->_clones_IMU.size() > state->_options.max_clone_size) {
double marginal_time = state->margtimestep();
+ assert(marginal_time != INFINITY);
StateHelper::marginalize(state, state->_clones_IMU.at(marginal_time));
// Note that the marginalizer should have already deleted the clone
// Thus we just need to remove the pointer to it from our state