我使用两点灰色Chameleon3单声道相机设置为Master Salve并同步,因此可以作为立体相机。然后按照OpenCV教程校准相机。我有70个样品用于校准并获得校准结果。我有所有Camera参数可以访问它们并从校准中获得内在和外在参数
所以我拍摄了左右图像,使用相机矩阵和失真系数对它们进行了解决,并使用平移和旋转矩阵将它们校正为极线形式。然后使用calib3d模块的StereoSGBM类来创建视差图。这是我的代码
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <vector>
#include <string>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d.hpp>
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
int i, j, lr;
String imageName_left( "/home/OpenCVCode/left_7m.pgm" ); // by default
if( argc > 1)
{
imageName_left = argv[1];
}
Mat image_left;
image_left = imread( imageName_left, IMREAD_COLOR ); // Read the file
if( image_left.empty() ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
String imageName_right( "/home/OpenCVCode/right_7m.pgm" ); // by default
if( argc > 1)
{
imageName_right = argv[1];
}
Mat image_right;
image_right = imread( imageName_right, IMREAD_COLOR ); // Read the file
if( image_right.empty() ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
cv::Size imageSize;
imageSize = image_left.size();
//Mat outputImage = image.clone();
vector<cv::Point3f> lines[2];
//double err = 0;
Mat R1=Mat1d(3, 3);
Mat R2=Mat1d(3, 3);
Mat P1=Mat1d(3, 4);
Mat P2=Mat1d(3, 4);
Mat Q=Mat1d(4, 4);
Rect validRoi[2];
double R_data[] = {0.9997286422545486, 0.000835337139108146, -0.023279692174526096,
0.0008925647184101439, 0.9998880281815514, -0.00244797956076857,
0.02327756826002481, 0.0024680939144444804, 0.9997259941245548};
double T_data[] = {-0.13808630092854335, -0.0016795993989732654, -0.005964101318274714};
cv::Mat R(3, 3, CV_64F, R_data);
cv::Mat T(3, 1, CV_64F, T_data);
Mat Camera_Matrix_Left = (Mat1d(3, 3) << 446.441726, 0, 266.768096, 0, 448.805499, 186.652251, 0, 0, 1);
Mat Distortion_Coefficients_Left = (Mat1d(1, 5) << -0.174502, 0.079787, 0.001010, 0.000899, 0);
Mat Camera_Matrix_Right = (Mat1d(3, 3) << 440.825465, 0, 277.733688, 0, 442.324307, 198.863384, 0, 0, 1);
Mat Distortion_Coefficients_Right = (Mat1d(1, 5) << -0.226564, 0.290791, 0.000979, 0.003149, 0);
Mat Matrix_R = (Mat1d(3, 3) << 0.9997286422545486, 0.000835337139108146, -0.023279692174526096,
0.0008925647184101439, 0.9998880281815514, -0.00244797956076857,
0.02327756826002481, 0.0024680939144444804, 0.9997259941245548);
Mat Matrix_T = (Mat1d(3, 1) << -0.13808630092854335, -0.0016795993989732654, -0.005964101318274714);
//undistort(image, outputImage, Camera_Matrix, Distortion_Coefficients);
stereoRectify(Camera_Matrix_Left, Distortion_Coefficients_Left, Camera_Matrix_Right, Distortion_Coefficients_Right, image_left.size(), R, T, R1, R2, P1, P2, Q, 0, 1, imageSize, &validRoi[0]);
FileStorage fs1("4m2.yml", FileStorage::WRITE);
fs1 << "R1" << R1;
fs1 << "R2" << R2;
fs1 << "P1" << P1;
fs1 << "P2" << P2;
fs1 << "Q" << Q;
fs1.release();
// Maps for AP View
//cv::stereoRectify(camera_matrix_ap, distortion_ap, camera_matrix_lat, distortion_lat, rectimg_ap.size(), R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, rectimg_ap.size(), &validRoi[0], &validRoi[1] );
Mat map1x(image_left.size(), CV_32FC1, 255.0);
Mat map2x(image_left.size(), CV_32FC1, 255.0);
// Maps for LAT View
Mat map1y(image_right.size(), CV_32FC1, 255.0);
Mat map2y(image_right.size(), CV_32FC1, 255.0);
cv::initUndistortRectifyMap(Camera_Matrix_Left, Distortion_Coefficients_Left, R1, P1, image_left.size(), CV_32FC1, map1x, map1y);
cv::initUndistortRectifyMap(Camera_Matrix_Right, Distortion_Coefficients_Right, R2, P2, image_right.size(), CV_32FC1, map2x, map2y);
cv::Mat tmp1, tmp2;
cv::remap(image_left, tmp1, map1x, map1y, INTER_LINEAR);
cv::remap(image_right, tmp2, map2x, map2y, INTER_LINEAR);
namedWindow( "Display window1", WINDOW_AUTOSIZE ); // Create a window for display.
imshow( "Display window1", tmp2); // Show our image inside it.
namedWindow( "Display window2", WINDOW_AUTOSIZE ); // Create a window for display.
imshow( "Display window2", tmp2 );
//Dispaity Map
cv::Mat pair;
pair.create(imageSize.height, imageSize.width * 2, CV_8UC3);
cv::Ptr<cv::StereoSGBM> stereo = cv::StereoSGBM::create(
-64, 128, 11, 100, 1000, 32, 0, 15, 1000, 16, cv::StereoSGBM::MODE_HH);
cv::Mat img1 = cv::imread(imageName_left, 0);
cv::Mat img2 = cv::imread(imageName_right, 0);
cv::Mat img1r, img2r, disp, vdisp;
cv::remap(img1, img1r, map1x, map1y, cv::INTER_LINEAR);
cv::remap(img2, img2r, map2x, map2y, cv::INTER_LINEAR);
stereo->compute(img1r, img2r, disp);
cv::normalize(disp, vdisp, 0, 256, cv::NORM_MINMAX, CV_8U);
cv::imshow("Vertical disparity", vdisp);
cv::Mat part = pair.colRange(0, imageSize.width);
cvtColor(img1r, part, cv::COLOR_GRAY2BGR);
part = pair.colRange(imageSize.width, imageSize.width * 2);
cvtColor(img2r, part, cv::COLOR_GRAY2BGR);
for (j = 0; j < imageSize.height; j += 16)
cv::line(pair, cv::Point(0, j), cv::Point(imageSize.width * 2, j),cv::Scalar(0, 255, 0));
cv::imshow("Vertical rectified", pair);
waitKey(); // Wait for a keystroke in the window
//return 0;
}
这是左右原始图像。 Left Image Right Image
这里链接到左left raw image和右图像right raw image
Rectified Images和差异地图disparity Map
我有经过纠正的图像和视差图。我想计算从摄像机到棋盘的实际距离。怎么办?我知道从相机到该物体的地面真实距离,但我想知道我的相机有多准确。有帮助吗?谢谢
既然你做了所有艰苦的工作并得到了你的视差图,那么从这里计算深度就很容易了。要计算每个像素的深度,您需要三件事: 1-像素的视差值 2-相机之间的距离 3-焦距(如果由于某种原因你的相机有不同的焦距,你可以使用平均值)
Depth = (focal length * distance between cameras) / (disparity value)
将此等式应用于视差图的每个像素。如果焦距和视差值的单位都是像素,则保留相机之间的距离单位(cm,mm ...)
// assuming dmap is the single channel disparity map Mat
// f:focal length, b:distance between cameras
depthImage = cv::Mat(dmap.size(), CV_32FC1);
for (int row = 0; row < dmap.rows; ++row)
{
for (int col = 0; col < dmap.cols; ++col)
{
int disparityValue = (int) dmap.at<uchar>(row, col);
depthImage.at<float>(row, col) = (f * b) / disparityValue;
}
}
或者你可以只使用available opencv function,可能更好。