解析 bag 文件并制作 BundleFusion 输入源格式

tech2022-11-27  87

在上一篇博客 《使用Intel RealSense D435i自制离线数据集跑通BundleFusion》 中,我们主要介绍了如何自制离线数据集跑通 BundleFusion 。在这篇博客中,我们对于格式转换过程中所涉及的 bag 文件解析、时间戳对齐以及制作源格式部分融合为 bag2org.py ,输入为 bag 文件,输出为 BundleFusion 所要求的输入源格式,从而避免程序间的频繁切换。

首先,需创建三个文件夹 depth、rgb 以及 result,分别用于存放深度图像、彩色图像以及源格式数据。

同时,创建并初始化 info.txt 文件。修改项主要为深度与彩色图像的分辨率、相机内参标定以及总帧数。其中,相机内参标定可通过 Intel RealSense Viewer 进行查看,总帧数将在程序执行过程中根据数据集的实际大小自动修改,可先任意设定一个值。

m_versionNumber = 4 m_sensorName = StructureSensor m_colorWidth = 640 # 修改项 m_colorHeight = 480 # 修改项 m_depthWidth = 640 # 修改项 m_depthHeight = 480 # 修改项 m_depthShift = 1000 m_calibrationColorIntrinsic = 582.871 0 320 0 0 582.871 240 0 0 0 1 0 0 0 0 1 # 修改项 m_calibrationColorExtrinsic = 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 m_calibrationDepthIntrinsic = 583 0 320 0 0 583 240 0 0 0 1 0 0 0 0 1 # 修改项 m_calibrationDepthExtrinsic = 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 m_frames.size = 2016 # 程序自动修改,无需改动或任意设置

bag2org.py 脚本文件(修改相关路径即可):

import shutil import os import argparse import sys import os import numpy import roslib import rosbag import rospy import cv2 import os from sensor_msgs.msg import Image from cv_bridge import CvBridge from cv_bridge import CvBridgeError # 1 Resolve bag file and extract depth/rgb images with time stamp. rgb = '/home/magus/jizongxing-workspace/slam/rosImage/rgb/' # rgb path depth = '/home/magus/jizongxing-workspace/slam/rosImage/depth/' # depth path bridge = CvBridge() file_handle1 = open('/home/magus/jizongxing-workspace/slam/rosImage/depth-stamp.txt', 'w') # time stamp for depth file_handle2 = open('/home/magus/jizongxing-workspace/slam/rosImage/rgb-stamp.txt', 'w') # time stamp for rgb with rosbag.Bag('/home/magus/jizongxing-workspace/bedroom.bag', 'r') as bag: for topic, msg, t in bag.read_messages(): if topic == "/device_0/sensor_0/Depth_0/image/data": # depth image topic cv_image = bridge.imgmsg_to_cv2(msg) timestr = "%.6f" % msg.header.stamp.to_sec() image_name = timestr+ ".png" # depth image (format:timestamp.png) path = "depth/" + image_name # relative path of depth image file_handle1.write(timestr + " " + path + '\n') cv2.imwrite(depth + image_name, cv_image) if topic == "/device_0/sensor_1/Color_0/image/data": # rgb image topic cv_image = bridge.imgmsg_to_cv2(msg,"bgr8") timestr = "%.6f" % msg.header.stamp.to_sec() image_name = timestr+ ".jpg" # rgb image (format:timestamp.jpg) path = "rgb/" + image_name # relative path of rgb file_handle2.write(timestr + " " + path + '\n') cv2.imwrite(rgb + image_name, cv_image) file_handle1.close() file_handle2.close() # 2 Match depth images to rgb images according to time stamp (the closest two will be paired) def read_file_list(filename): file = open(filename) data = file.read() lines = data.replace(",", " ").replace("\t", " ").split("\n") list = [[v.strip() for v in line.split(" ") if v.strip() != ""] for line in lines if len(line) > 0 and line[0] != "#"] list = [(float(l[0]), l[1:]) for l in list if len(l) > 1] return dict(list) def associate(first_list, second_list, offset, max_difference): first_keys = first_list.keys() second_keys = second_list.keys() potential_matches = [(abs(a - (b + offset)), a, b) for a in first_keys for b in second_keys if abs(a - (b + offset)) < max_difference] potential_matches.sort() matches = [] for diff, a, b in potential_matches: if a in first_keys and b in second_keys: first_keys.remove(a) second_keys.remove(b) matches.append((a, b)) matches.sort() return matches first_file = "/home/magus/jizongxing-workspace/slam/rosImage/depth-stamp.txt" second_file = "/home/magus/jizongxing-workspace/slam/rosImage/rgb-stamp.txt" first_list = read_file_list(first_file) second_list = read_file_list(second_file) matches = associate(first_list, second_list, float(0.0), float(0.02)) file_handle3 = open('/home/magus/jizongxing-workspace/slam/rosImage/associate.txt', 'w') for a, b in matches: pair = str(a) + " " + " ".join(first_list[a]) + " " + str(b - float(0.0)) + " " + " ".join(second_list[b]) file_handle3.write(pair + '\n') file_handle3.close() # 3 Rename each pair and initialize pose bp='/home/magus/jizongxing-workspace/slam/rosImage/' # base path rp='/home/magus/jizongxing-workspace/slam/rosImage/result/' # result path file_handle4 = open('/home/magus/jizongxing-workspace/slam/rosImage/associate.txt', 'r') count = 0 for line in file_handle4.readlines(): line = line.strip() # relative path of depth and color path_d = line.split()[1] path_c = line.split()[3] if (os.path.exists(bp+path_d) == True) and (os.path.exists(bp+path_c) == True): rName = "frame-" + str(count).zfill(6) + ".color.jpg" # Rename rgb image dName = "frame-" + str(count).zfill(6) + ".depth.png" # Rename depth image pName = "frame-" + str(count).zfill(6) + ".pose.txt" # Rename pose file # create pose file file_handle5 = open(rp+pName, 'w') file_handle5.write("1 0 0 0\n0 1 0 0\n0 0 1 0\n0 0 0 1") file_handle5.close() shutil.copy(bp+path_d,rp+dName) shutil.copy(bp+path_c,rp+rName) count = count+1 file_handle4.close() # 4 create info.txt to generate the required input format of BundleFusion file_handle6 = open('/home/magus/jizongxing-workspace/slam/rosImage/info.txt', 'r+') info_lines = file_handle6.readlines() info_lines[11] = "m_frames.size = " + str(count) file_handle7 = open('/home/magus/jizongxing-workspace/slam/rosImage/result/info.txt', 'w+') file_handle7.writelines(info_lines) file_handle6.close() file_handle7.close() # 5 success message print("%s" % "Successfully transform the bag file to the required input format of BundleFusion")

执行结束后,再将源格式封装为 sens 格式即可进行离线三维重建。

最新回复(0)