我实现了 3d 重建,但这个过程会出问题.. 如何解决?
我要执行这份工作
1.从每个图像中提取 SIFT 并找到推定的匹配项
2.应用RANSAC应该拒绝异常值
3.找到最佳图像对(简单地说,具有最大匹配或考虑基线)
4.Estimate motion(Randt) and Reconstruct 3D points for the selected image pair。一个相机的相机坐标用于世界坐标。
5。搜索具有足够点的图像,看到重建的 3D 点
6.Computepose(Randt) 用于这些图像并重建从两个以上图像中看到的更多 3D 点
重复生长步骤,直到包含每个相机。
File "C:\Users\usrs\Documents\CV\CV.py", line 39, in ransac
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
cv2.error: OpenCV(4.5.1) ..\modules\calib3d\src\fundam.cpp:385: error: (-28:Unknown error code -28) The input arrays should have at least 4 corresponding point sets to calculate Homography in function 'cv::findHomography'
import cv2
import numpy as np
import scipy
import open3d as o3d
import os
import glob
# Correspondence search step
def correspondence_search(images):
sift = cv2.xfeatures2d.SIFT_create()
kp_list = []
des_list = []
matches_list = []
for i in range(len(images)):
img = cv2.imread(images[i])
kp, des = sift.detectAndCompute(img, None)
if len(kp) > 0 and len(des) > 0:
kp_list.append(kp)
des_list.append(des)
else:
print(f"No keypoints or descriptors found in image {i}")
for j in range(i + 1, len(images)):
matcher = cv2.BFMatcher()
matches = matcher.match(des_list[0][i], des_list[0][j])
#matches = matcher.match(des_list[i], des_list[j])
matches_list.append((i, j, matches))
return kp_list, des_list, matches_list
# RANSAC to reject outliers
def ransac(kp1, kp2, matches):
if isinstance(matches, int):
matches = [matches]
filtered_matches = [m for m in matches if hasattr(m, 'queryIdx') and hasattr(m, 'trainIdx')]
src_pts = np.float32([kp1[m.queryIdx].pt for m in filtered_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in filtered_matches]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matches_mask = mask.ravel().tolist()
return matches_mask
# Initialization step
def initialization_step(kp_list, des_list, matches_list):
max_matches = 0
best_pair = None
for match in matches_list:
i, j, matches = match
matches_mask = ransac(kp_list[i], kp_list[j], matches)
if matches_mask.count(1) > max_matches:
max_matches = matches_mask.count(1)
best_pair = (i, j, matches_mask)
Randt = estimate_motion(kp_list[best_pair[0]], kp_list[best_pair[1]], np.array(matches)[np.array(matches_mask).nonzero()[0]])
points_3d = reconstruct_3d_points(kp_list[best_pair[0]], kp_list[best_pair[1]], Randt, np.array(matches)[np.where(matches_mask == 1)[0]])
return best_pair, Randt, points_3d
# Estimate motion and reconstruct 3D points
def estimate_motion(kp1, kp2, matches):
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
E, mask = cv2.findEssentialMat(src_pts, dst_pts, np.eye(3), method=cv2.RANSAC, prob=0.99, threshold=0.5)
_, R, t, mask = cv2.recoverPose(E, src_pts, dst_pts)
Randt = np.hstack((R, t))
return Randt
def reconstruct_3d_points(kp1, kp2, Randt, matches):
K = np.array([[1000, 0, 500], [0, 1000, 500], [0, 0, 1]])
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
if K.shape != (3, 4):
K = np.hstack((K, np.zeros((3, 1))))
if Randt.shape != (3, 4):
Randt = np.hstack((Randt, np.zeros((3, 1))))
points_4d_hom = cv2.triangulatePoints(K, Randt, src_pts.T, dst_pts.T)
points_3d_hom = points_4d_hom / points_4d_hom[3]
points_3d = points_3d_hom[:3].T
return points_3d
def growing_step(best_pair, Randt, kp_list, des_list, matches_list, points_3d):
reconstructed_indices = list(best_pair[:2])
while len(reconstructed_indices) < len(kp_list):
max_points = 0
best_index = None
for i in range(len(kp_list)):
if i not in reconstructed_indices:
matches_mask = ransac(kp_list[reconstructed_indices[-1]], kp_list[i],
matches_list[reconstructed_indices[-1]][i])
if matches_mask.count(1) > 20:
Rnewt = estimate_motion(kp_list[reconstructed_indices[-1]], kp_list[i],
np.array(matches_list[reconstructed_indices[-1]][i])[
np.where(matches_mask == 1)[0]])
Pnew = reconstruct_3d_points(kp_list[reconstructed_indices[-1]], kp_list[i],
np.dot(Rnewt, Randt[:, -1]), np.array(matches_list[reconstructed_indices[-1]][i])[
np.where(matches_mask == 1)[0]])
num_points = np.sum([p[2] > 0 for p in Pnew])
if num_points > max_points:
max_points = num_points
best_index = i
if best_index is None:
break
matches_mask = ransac(kp_list[reconstructed_indices[-1]], kp_list[best_index],
matches_list[reconstructed_indices[-1]][best_index])
Randt_new = estimate_motion(kp_list[reconstructed_indices[-1]], kp_list[best_index],
np.array(matches_list[reconstructed_indices[-1]][best_index])[np.where(matches_mask == 1)[0]])
P_new = reconstruct_3d_points(kp_list[reconstructed_indices[-1]], kp_list[best_index], np.dot(Randt_new, Randt[:, -1]), np.array(matches_list[reconstructed_indices[-1]][best_index])[np.where(matches_mask == 1)[0]])
points_3d = np.vstack((points_3d, P_new))
reconstructed_indices.append(best_index)
return points_3d
def bundle_optimization(points_3d, Randt, K, kp_list, matches_list):
points_3d_hom = np.concatenate((points_3d, np.ones((points_3d.shape[0], 1))), axis=1)
num_matches = sum(len(matches) for matches in matches_list)
num_points = points_3d.shape[0]
A = np.zeros((2 * num_matches, 3 * num_points))
b = np.zeros((2 * num_matches,))
index_map = {}
index = 0
for i, matches in enumerate(matches_list):
proj_mat = np.dot(np.dot(K, Randt[i*3:i*3+3]), np.linalg.inv(np.vstack((points_3d_hom.T, np.zeros((1, points_3d_hom.shape[0]))))))
for j, match in enumerate(matches):
if match is not None:
if match not in index_map:
index_map[match] = index
index += 1
proj_point_hom = np.dot(proj_mat, points_3d_hom[match])
proj_point = proj_point_hom[:2] / proj_point_hom[2]
A[2*j:2*j+2, 3*index_map[match]:3*index_map[match]+3] = np.array([[1, 0, -proj_point[0]], [0, 1, -proj_point[1]]])
b[2*j:2*j+2] = kp_list[i][j][:2] - proj_point
x, _ = scipy.optimize.nnls(A, b)
for i in range(num_points):
points_3d[i] += x[3*i:3*i+3]
return points_3d, Randt
def visualization_step(points_3d):
# Create an Open3D point cloud from the reconstructed 3D points
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points_3d)
#pcd.points = o3d.utility.Vector3dVector(points_3d[:, :3])
o3d.visualization.draw_geometries([pcd])
# Export the point cloud as a PLY file
o3d.io.write_point_cloud("reconstructed_scene.ply", pcd)
def pipeline(images):
kp_list, des_list, matches_list = correspondence_search(images)
best_pair, Randt, points_3d = initialization_step(kp_list, des_list, matches_list)
points_3d = growing_step(best_pair, Randt, kp_list, des_list, matches_list, points_3d)
visualization_step(points_3d)
image_folder = 'data/'
images = glob.glob(os.path.join(image_folder,"*.jpg"))
#images = ["image1.png", "image2.png", "image3.png", "image4.png"]
pipeline(images)
我试过转换点。但我无法处理它..