全景拼接
1. 将读取进行灰度转化,并且输出图像,关键点和计算描述
import cv2
import numpy as np
# 将读取进行灰度转化,并且输出图像,关键点和计算描述
image_left = cv2.imread("C:\\Users\\HONOR\\Desktop\\image\\pinjie_1.png")
image_right = cv2.imread("C:\\Users\\HONOR\\Desktop\\image\\pinjie_2.png")
imge_letf = cv2.resize(image_left, (image_right.shape[1], image_left.shape[0]))
print(imge_letf.shape)
# 图像灰度化
image_left_gray = cv2.cvtColor(image_left, cv2.COLOR_RGB2GRAY)
image_right_gray = cv2.cvtColor(image_right, cv2.COLOR_RGB2GRAY)
# 创建sift对象
sift = cv2.SIFT_create()
# 计算关键点
kp_left, des_left = sift.detectAndCompute(image_left_gray, None)
kp_right, des_right = sift.detectAndCompute(image_right_gray, None)
# 绘制关键点
draw_left = cv2.drawKeypoints(
image_left_gray, kp_left, None, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
)
draw_right = cv2.drawKeypoints(
image_right_gray, kp_right, None, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
)
# 显示图片
cv2.imshow('draw_left', np.hstack((image_left, draw_left)))
cv2.imshow('draw_right', np.hstack((image_right, draw_right)))
cv2.waitKey(0)
# 保存图片
cv2.imwrite('draw_left.png', np.hstack((image_left, draw_left)))
cv2.imwrite('draw_right.png', np.hstack((image_right, draw_right)))
运行后保存图片:
2.利用匹配器 匹配两个描述符的相近程度,并将特征点进行连线
# 使用**cv.BFMatcher**()创建BFMatcher对象
bf = cv2.BFMatcher()
# 利用匹配器 匹配两个描述符的相近程度
# (knn 匹配可以返回k个最佳的匹配项 bf返回所有的匹配项)
matches = bf.knnMatch(des_right, des_left, k=2)
# 按照相近程度,进行排序
matches = sorted(matches, key=lambda x: x[0].distance / x[1].distance)
good = []
for m, n in matches:
# 对欧式距离进行筛选d
if m.distance < 0.6 * n.distance:
good.append(m)
all_good_image = cv.drawMatches(
img_right, key_right, img_left, key_left, good_match, None, None, None, None, flags=2
)
bfs.img_show("关键点之间进行连线", all_goog_image)
3.对图像进行拼接
if len(good) > 4:
ptsR = np.float32(
[kp_right[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
ptsL = np.float32(
[kp_left[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
Homography, status = cv2.findHomography(
ptsR, ptsL, cv2.RANSAC, ransacReprojThreshold)
Panorama = cv2.warpPerspective(
image_right, Homography, (image_right.shape[1] + image_left.shape[1], image_right.shape[0])
)
cv2.imshow("扭曲变换后的右图", Panorama)
cv2.imwrite('扭曲变换后的右图.png', Panorama)
cv2.waitKey(0)
cv2.destroyAllWindows()
Panorama[0:image_left.shape[0], 0:image_left.shape[1]] = image_left
cv2.namedWindow("全景图", cv2.WINDOW_AUTOSIZE)
cv2.imshow("全景图", Panorama)
cv2.imwrite("END.png", Panorama)
cv2.waitKey(0)
cv2.destroyAllWindows()
Panorama[0:image_left.shape[0], 0:image_left.shape[1]] = image_left
ValueError: could not broadcast input array from shape (376,498,3) into shape (374,498,3)
以上保存的原因是:出现这个问题的主要原因是因为list中array的shape不一致造成的,所以发生这个问题的时候。
解决方法:这个 时候检测一下面这两段代码有没有起作用
image_right = cv2.resize(image_right, (400, 800))
image_left = cv2.resize(image_left, (400, 800))
bug解决:
如果我们的扭曲变换后的图
和最后的图
出现这样的情况的话我们需要关注一下以下代码
因为这个段代码是将左图`des_left`作为模板图,右图作为`des_right`最为去匹配的图
matches = bf.knnMatch(des_left, des_right, k=2)
但是这两段代码所反映的情况是将右图作为模板图进行拼接(也就是计算机蒙了)
Panorama = cv2.warpPerspective(
image_right, Homography, (image_right.shape[1] + image_left.shape[1], image_right.shape[0])
)
Panorama[0:image_left.shape[0], 0:image_left.shape[1]] = image_left
源码:
# -*- coding: UTF-8 -*-
# @Project :opencv
# @File :全景拼接笔记.py
# @Author :阿龙的代码在报错
# @IDE :PyCharm
# @Date :2024/4/13 16:23
import cv2
import numpy as np
# 将读取进行灰度转化,并且输出图像,关键点和计算描述
image_left = cv2.imread("C:\\Users\\HONOR\\Desktop\\image\\pinjie_1.png")
image_right = cv2.imread("C:\\Users\\HONOR\\Desktop\\image\\pinjie_2.png")
# 将图片设置为同样的大小
image_right = cv2.resize(image_right, (400, 800))
image_left = cv2.resize(image_left, (400, 800))
# 图像灰度化
gray_left = cv2.cvtColor(image_left, cv2.COLOR_BGR2GRAY)
gray_right = cv2.cvtColor(image_right, cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT_create()
kp_left, des_left = sift.detectAndCompute(image_left, None)
kp_right, des_right = sift.detectAndCompute(image_right, None)
draw_left = cv2.drawKeypoints(
gray_left, kp_left, None, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
draw_right = cv2.drawKeypoints(
gray_right, kp_right, None, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
# 显示图片
cv2.imshow('draw_left', np.hstack((image_left, draw_left)))
cv2.imshow('draw_right', np.hstack((image_right, draw_right)))
cv2.waitKey(0)
# 保存图片
cv2.imwrite('draw_left.png', np.hstack((image_left, draw_left)))
cv2.imwrite('draw_right.png', np.hstack((image_right, draw_right)))
# 使用**cv.BFMatcher**()创建BFMatcher对象
bf = cv2.BFMatcher()
# 利用匹配器 匹配两个描述符的相近成都
# (knn 匹配可以返回k个最佳的匹配项 bf返回所有的匹配项)
matches = bf.knnMatch(des_right, des_left, k=2)
# queryDescriptors:查询图像的特征描述子集 (第一个参数)
# trainDescriptors:训练图像的特征描述子集 (第二个参数)
# 按照相近程度,进行排序
matches = sorted(matches, key=lambda x: x[0].distance / x[1].distance)
good = []
for m, n in matches:
# 对欧式距离进行筛选
if m.distance < 0.6 * n.distance:
good.append(m)
all_goog_image = cv2.drawMatches(
image_right, kp_right, image_left, kp_left, good, None, None, None, None, flags=2
)
cv2.imshow("关键点之间进行连线", all_goog_image)
cv2.imwrite('all_good.png', all_goog_image)
if len(good) > 4:
ptsR = np.float32(
[kp_right[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
ptsL = np.float32(
[kp_left[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
Homography, status = cv2.findHomography(
ptsR, ptsL, cv2.RANSAC, ransacReprojThreshold)
Panorama = cv2.warpPerspective(
image_right, Homography, (image_right.shape[1] + image_left.shape[1], image_right.shape[0])
)
cv2.imshow("扭曲变换后的右图", Panorama)
cv2.imwrite('扭曲变换后的右图.png', Panorama)
cv2.waitKey(0)
cv2.destroyAllWindows()
Panorama[0:image_left.shape[0], 0:image_left.shape[1]] = image_left
cv2.namedWindow("全景图", cv2.WINDOW_AUTOSIZE)
cv2.imshow("全景图", Panorama)
cv2.imwrite("END.png", Panorama)
cv2.waitKey(0)
cv2.destroyAllWindows()