我想检测两个图像img1和img2之间的差异,并获得类似desired output的输出。
我已经尝试使用筛选算法来检测两个图像的特征,然后提取出良好的匹配项。
sift = cv.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 5
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 2)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.8*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w= img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)
我得到了结果result
有什么想法要从该算法中获得区别,而不是好的匹配吗?
您可以进行透视变形以使一个图像与另一个图像对齐。将它们都转换为HSV并获得饱和通道。然后对它们进行阈值化,得到它们之间的绝对差。然后获得轮廓,最后获得轮廓的边界框,这些轮廓框将绘制在第一张图像上。这是使用Python / OpenCV的方法。注意,我尚未安装SIFT,因此将在其位置使用ORB。
输入1(参考图像):
输入2(要变形/对齐的图像):
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# im2 is reference and im1 in to be warped to match im2
# note: numbering is swapped in function
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
cv2.imwrite("pipes_matches.png", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
if __name__ == '__main__':
# Read reference image
refFilename = "pipes1.jpg"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "pipes2.jpg"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
# Aligned image will be stored in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Print estimated homography
print("Estimated homography : \n", h)
# Convert images to HSV and get saturation channel
refSat = cv2.cvtColor(imReference, cv2.COLOR_BGR2HSV)[:,:,1]
imSat = cv2.cvtColor(imReg, cv2.COLOR_BGR2HSV)[:,:,1]
# Otsu threshold
refThresh = cv2.threshold(refSat, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
imThresh = cv2.threshold(imSat, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# apply morphology open and close
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
refThresh = cv2.morphologyEx(refThresh, cv2.MORPH_OPEN, kernel, iterations=1)
refThresh = cv2.morphologyEx(refThresh, cv2.MORPH_CLOSE, kernel, iterations=1).astype(np.float64)
imThresh = cv2.morphologyEx(imThresh, cv2.MORPH_OPEN, kernel, iterations=1).astype(np.float64)
imThresh = cv2.morphologyEx(imThresh, cv2.MORPH_CLOSE, kernel, iterations=1)
# get absolute difference between the two thresholded images
diff = np.abs(cv2.add(imThresh,-refThresh))
# apply morphology open to remove thin lines caused by slight misalignment of the two images
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13,13))
diff_cleaned = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel, iterations=1).astype(np.uint8)
# Filter using contour area and draw bounding boxes that do not touch the sides of the image
cnts = cv2.findContours(diff_cleaned, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
result = imReference.copy()
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(result, (x, y), (x+w, y+h), (0, 0, 255), 2)
# show images
cv2.imshow('reference', imReference)
cv2.imshow('image', im)
cv2.imshow('image_aligned', imReg)
cv2.imshow('refThresh', refThresh)
cv2.imshow('imThresh', imThresh)
cv2.imshow('diff', diff)
cv2.imshow('diff_cleaned', diff_cleaned)
cv2.imshow('result', result)
cv2.waitKey()
# save images
cv2.imwrite('pipes2_aligned.jpg', imReg)
cv2.imwrite('pipes_diff.png', diff_cleaned)
cv2.imwrite('pipes_result.png', result)
符合图片:
图像2(分别)与图像1对齐:
绝对差图像:
结果: