YOLOv5 分类模型 OpenCV和PyTorch两者实现预处理的差异
flyfish
PyTorch封装了PIL库
简单对比下两者的使用方法
import cv2
from PIL import Image
import numpy as np
full_path_file_name="/media/a//ILSVRC2012_val_00001244.JPEG"
#OpenCV读取图像默认是BGR顺序
cv_image=cv2.imread(full_path_file_name) #BGR
print(cv_image.shape)
cv_image=cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
#print("cv_image:",cv_image)#(400, 500, 3) HWC
#PIL读取图像默认是RGB顺序
pil_image=Image.open(full_path_file_name)
print("pil_image:",pil_image)
numpy_image=np.array(pil_image)
print(numpy_image.shape)#(400, 500, 3) HWC BGR
#print("numpy_image:",numpy_image)
这样OpenCV和PIL返回的是相同的数据
如果是height > width
的情况下,图像缩放大小是
(
size
×
height
width
,
size
)
\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)
(size×widthheight,size)
https://github.com/pytorch/vision/
vision/torchvision/transforms/functional.py
产生的问题
PyTorch中使用transforms.Resize
,transforms.Resize
使用了双线性插值和抗锯齿antialiasing
,与cv2.resize
处理不同。所以会造成推理结果有差异
def resize(img: Tensor, size: List[int], interpolation: InterpolationMode = InterpolationMode.BILINEAR,
max_size: Optional[int] = None) -> Tensor:
The output image might be different depending on its type: when downsampling, the interpolation of PIL images
and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
types.
对比下差异
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage.metrics import mean_squared_error as mse
target_size =224
img_w = pil_image.width
img_h = pil_image.height
image_width, image_height =0,0
if(img_h >= img_w):# hw
image_width, image_height =target_size, int(target_size * img_h / img_w)
else:
image_width, image_height =int(target_size * img_w / img_h),target_size
print(image_width, image_height)
pil_resize_img = pil_image.resize((image_width, image_height), Image.BILINEAR)
#print("pil_resize_img:",np.array(pil_resize_img))
pil_resize_img=np.array(pil_resize_img)
cv_resize_img0 = cv2.resize(cv_image, (image_width, image_height), interpolation=cv2.INTER_CUBIC)
#print("cv_resize_img:",cv_resize_img0)
cv_resize_img1 = cv2.resize(cv_image, (image_width, image_height), interpolation=cv2.INTER_NEAREST)
cv_resize_img2 = cv2.resize(cv_image, (image_width, image_height), interpolation=cv2.INTER_LINEAR)
cv_resize_img3 = cv2.resize(cv_image, (image_width, image_height), interpolation=cv2.INTER_AREA)
cv_resize_img4 = cv2.resize(cv_image, (image_width, image_height), interpolation=cv2.INTER_LANCZOS4)
cv_resize_img5 = cv2.resize(cv_image, (image_width, image_height), interpolation=cv2.INTER_LINEAR_EXACT)
cv_resize_img6 = cv2.resize(cv_image, (image_width, image_height), interpolation=cv2.INTER_NEAREST_EXACT)
print(mse(pil_resize_img,pil_resize_img))
print(mse(pil_resize_img,cv_resize_img0))
print(mse(pil_resize_img,cv_resize_img1))
print(mse(pil_resize_img,cv_resize_img2))
print(mse(pil_resize_img,cv_resize_img3))
print(mse(pil_resize_img,cv_resize_img4))
print(mse(pil_resize_img,cv_resize_img5))
print(mse(pil_resize_img,cv_resize_img6))
可以使用structural_similarity、peak_signal_noise_ratio 、mean_squared_error对比
这里使用mean_squared_error
0.0
30.721508290816328
103.37267219387755
13.030575042517007
2.272438350340136
36.33767538265306
13.034412202380953
51.2258237670068
PyTorch推荐做法是 Therefore, it is preferable to train and serve a model with the same input types.
训练和部署使用相同的输入