我的大多数代码源于以下两个来源:
http://www.learnopencv.com/homography-examples-using-opencv-python-c/和http://docs.opencv.org/3.1.0/d1/de0/tutorial_py_feature_homography.html.
import numpy as npimport cv2from matplotlib import pyplot as plt# FIXME: doesn't workdef deskew(): im_out = cv2.warpPerspective(img1,M,(img2.shape[1],img2.shape[0])) plt.imshow(im_out,'gray') plt.show()# resizing images to improve speedfactor = 0.4img1 = cv2.resize(cv2.imread("image.png",0),None,fx=factor,fy=factor,interpolation=cv2.INTER_CUBIC)img2 = cv2.resize(cv2.imread("imageSkewed.png",interpolation=cv2.INTER_CUBIC)surf = cv2.xfeatures2d.SURF_create()kp1,des1 = surf.detectAndCompute(img1,None)kp2,des2 = surf.detectAndCompute(img2,None)FLANN_INDEX_KDTREE = 0index_params = dict(algorithm=FLANN_INDEX_KDTREE,trees=5)search_params = dict(checks=50)flann = cv2.FlannBasedMatcher(index_params,search_params)matches = flann.knnMatch(des1,des2,k=2)# store all the good matches as per Lowe's ratio test.good = []for m,n in matches: if m.distance < 0.7 * n.distance: good.append(m)MIN_MATCH_COUNT = 10if len(good) > MIN_MATCH_COUNT: src_pts = np.float32([kp1[m.queryIDx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([kp2[m.trainIDx].pt for m in good ]).reshape(-1,2) M,mask = cv2.findHomography(src_pts,dst_pts,cv2.RANSAC,5.0) matchesMask = mask.ravel().toList() h,w = img1.shape pts = np.float32([[0,0],[0,h - 1],[w - 1,0]]).reshape(-1,2) dst = cv2.perspectivetransform(pts,M) deskew() img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3,cv2.liNE_AA)else: print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)) matchesMask = None# show matching keypointsdraw_params = dict(matchcolor=(0,# draw matches in green color singlePointcolor=None,matchesMask=matchesMask,# draw only inlIErs flags=2)img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,**draw_params)plt.imshow(img3,'gray')plt.show()解决方法 结果我非常接近解决我自己的问题.
这是我的代码的工作版本:
import numpy as npimport cv2from matplotlib import pyplot as pltimport mathdef deskew(): im_out = cv2.warpPerspective(skewed_image,np.linalg.inv(M),(orig_image.shape[1],orig_image.shape[0])) plt.imshow(im_out,'gray') plt.show()orig_image = cv2.imread(r'image.png',0)skewed_image = cv2.imread(r'imageSkewed.png',0)surf = cv2.xfeatures2d.SURF_create(400)kp1,des1 = surf.detectAndCompute(orig_image,des2 = surf.detectAndCompute(skewed_image,5.0) # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.HTML for details ss = M[0,1] sc = M[0,0] scaleRecovered = math.sqrt(ss * ss + sc * sc) thetaRecovered = math.atan2(ss,sc) * 180 / math.pi print("Calculated scale difference: %.2f\ncalculated rotation difference: %.2f" % (scaleRecovered,thetaRecovered)) deskew()else: print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)) matchesMask = None总结
以上是内存溢出为你收集整理的python – 使用OpenCV和SIFT / SURF校正扫描图像以匹配原始图像全部内容,希望文章能够帮你解决python – 使用OpenCV和SIFT / SURF校正扫描图像以匹配原始图像所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)