当前位置: 首页 > news >正文

【变化检测】基于Superpoint+Lightglue+TinyCD建筑物(LEVIR-CD)变化检测实战及ONNX推理

后面再详细完善内容吧,先丢代码!

1 创建文件与输入文件夹

注意:img中包括A期与B期文件夹,图片名要求一致对应。
在这里插入图片描述

1.1 运行代码

新建main.py文件,内容如下:

import os
import cv2
import time
import argparse
import numpy as np
from tinycd import Tinycd
from SuperPointLightGlue import SuperPointLightGlueif __name__ == '__main__':# Create an argument parser to handle command-line argumentsparser = argparse.ArgumentParser()parser.add_argument('--lightglue_model', type=str, default='weights/superpoint_1024_lightglue_end2end.onnx', help='superpoint_lightglue特征匹配模型路径')parser.add_argument('--tinycd_model', type=str, default='weights/tinycd_dy.onnx', help='变化检测模型路径')parser.add_argument('--source_A', type=str, default=str('img/A'), help='A期图像文件夹')parser.add_argument('--source_B', type=str, default=str('img/B'), help='B期图像文件夹')parser.add_argument('--results', type=str, default=str('img/results'), help='变化检测掩码结果')parser.add_argument('--sum_results', type=str, default=str('img/sum_results'), help='A+B+变化检测掩码结果')parser.add_argument('--in_shape', type=int, default=1024, help='tinycd输入模型图像尺度')parser.add_argument('--detect_model', type=str, default='ADD_radio', help='选择融合模式')args = parser.parse_args()# 实例化模型lightglue = SuperPointLightGlue(args.lightglue_model, save_tmp=True)tiny_cd= Tinycd(args.tinycd_model, args.in_shape)obj = lightglue.glue(lightglue, args.source_A, args.source_B, args.detect_model)if not os.path.exists(args.results):os.makedirs(args.results, exist_ok=True)if not os.path.exists(args.sum_results):os.makedirs(args.sum_results, exist_ok=True)t1 = time.time()for i, (a_image, c_image, image_filename) in enumerate(obj):# 变化检测推理+输出mask_out = tiny_cd.infer(a_image, c_image)  # (256, 256, 1)print('对齐融合+变化检测,图像对{}==》总耗时:{}'.format(image_filename, time.time() - t1))cv2.imwrite(os.path.join(args.results, image_filename), mask_out)# 保存A+B+Mask拼接图if a_image.shape[0] != mask_out.shape[0]:final_concatenated = cv2.hconcat([a_image, c_image.astype(np.uint8),cv2.resize(cv2.cvtColor(mask_out, cv2.COLOR_GRAY2BGR),(a_image.shape[1],a_image.shape[0]), interpolation=cv2.INTER_LINEAR)])else:final_concatenated = cv2.hconcat([a_image, c_image.astype(np.uint8), cv2.cvtColor(mask_out, cv2.COLOR_GRAY2BGR)])cv2.imwrite(os.path.join(args.sum_results, image_filename), final_concatenated)t1 = time.time()continue

1.2 变化检测Tinycd代码

import os
import cv2
import time
import argparse
import numpy as np
import onnxruntime as ort  # 使用onnxruntime推理用上,pip install onnxruntime-gpu==1.12.0 -i https://pypi.tuna.tsinghua.edu.cn/simpleclass Tinycd(object):def __init__(self, onnx_model, in_shape=1024):self.in_shape = in_shape  # 图像输入尺度self.mean = [0.485, 0.456, 0.406]  # 定义均值和标准差(确保它们与图像数据的范围相匹配)  self.std = [0.229, 0.224, 0.225]  # 基于0-1范围的# 构建onnxruntime推理引擎self.ort_session = ort.InferenceSession(onnx_model,providers=['CUDAExecutionProvider', 'CPUExecutionProvider']if ort.get_device() == 'GPU' else ['CPUExecutionProvider'])# 归一化 def normalize(self, image, mean, std):  # 如果均值和标准差是基于0-255范围的图像计算的,那么需要先将图像转换为0-1范围  image = image / 255.0  image = image.astype(np.float32)  image_normalized = np.zeros_like(image)  for i in range(3):  # 对于 RGB 的每个通道  image_normalized[:, :, i] = (image[:, :, i] - mean[i]) / std[i]  return image_normalizeddef preprocess(self, img_a, img_b):# resize为1024大小if img_a.shape[0] != self.in_shape and img_a.shape[1] != self.in_shape:img_a = cv2.resize(img_a, (self.in_shape, self.in_shape), interpolation=cv2.INTER_LINEAR)if img_b.shape[0] != self.in_shape and img_b.shape[1] != self.in_shape:img_b = cv2.resize(img_b, (self.in_shape, self.in_shape), interpolation=cv2.INTER_LINEAR)# 应用归一化  img_a = self.normalize(img_a, self.mean, self.std)img_b = self.normalize(img_b, self.mean, self.std)img_a = np.ascontiguousarray(np.einsum('HWC->CHW', img_a)[::-1], dtype=np.single)  # (1024, 1024, 3)-->(3, 1024, 1024), BGR-->RGBimg_b = np.ascontiguousarray(np.einsum('HWC->CHW', img_b)[::-1], dtype=np.single)  # np.single 和 np.float32 是等价的img_a = img_a[None] if len(img_a.shape) == 3 else img_a  # (1, 3, 1024, 1024)img_b = img_b[None] if len(img_b.shape) == 3 else img_bconcat_img = np.concatenate((img_a, img_b), axis=1) return  concat_img# 推理def infer(self, img_a, img_b):concat_img = self.preprocess(img_a, img_b)  # (1024, 1024, 3)+(1024, 1024, 3) --> (1, 6, 1024, 1024)preds = self.ort_session.run(None, {self.ort_session.get_inputs()[0].name: concat_img})[0]  # (1, n, 1024, 1024) if preds.shape[1] == 1:out_img = (np.clip(preds[0][0], 0, 1) * 255).astype("uint8") else:out_img = (np.argmax(preds, axis=1)[0] * 255).astype("uint8")return out_imgif __name__ == '__main__':# Create an argument parser to handle command-line argumentsparser = argparse.ArgumentParser()parser.add_argument('--model', type=str, default='weights/tinycd_dy.onnx', help='Path to ONNX model')parser.add_argument('--source_A', type=str, default=str('img/A/test_7.png'), help='A期图像')parser.add_argument('--source_B', type=str, default=str('img/B/test_7.png'), help='B期图像')parser.add_argument('--in_shape', type=int, default=1024, help='输入模型图像尺度')args = parser.parse_args()# 实例化变化检测模型cd= Tinycd(args.model, args.in_shape)# Read image by OpenCVimg_a = cv2.imread(args.source_A)img_b = cv2.imread(args.source_B)# 推理+输出t1 = time.time()out = cd.infer(img_a, img_b)print('推理耗时:{}'.format(time.time() - t1))# 保存结果cv2.imwrite('img/test_7_res.png', out)

1.3 Superpoint+Lightglue特征提取与匹配代码

import os
import cv2
import matplotlib.cm as cm
import time
import numpy as np
import onnxruntime as ort  # 使用onnxruntime推理用上,pip install onnxruntime-gpu==1.12.0 -i https://pypi.tuna.tsinghua.edu.cn/simpleclass SuperPointLightGlue(object):def __init__(self, onnx_model, save_tmp=False) -> None:# 构建onnxruntime推理引擎self.ort_session = ort.InferenceSession(onnx_model,providers=['CUDAExecutionProvider', 'CPUExecutionProvider']if ort.get_device() == 'GPU' else ['CPUExecutionProvider'])self.input_h, self.input_w = 1024, 1024self.save_tmp = save_tmp  # 是否保存中间状态图与特征点匹配图def preprocess(self, image):'''预处理:转成灰度图-->resize-->pad-->归一化'''image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)scale = max(image.shape[0] / self.input_h, image.shape[1] / self.input_w)image_resize = cv2.resize(image_gray, (int(image_gray.shape[1] / scale), int(image_gray.shape[0] / scale)))hpad = self.input_h - image_resize.shape[0]wpad = self.input_w - image_resize.shape[1]image_pad = np.ones((image_resize.shape[0] + hpad, image_resize.shape[1] + wpad)) * 0.0image_pad[:image_resize.shape[0], :image_resize.shape[1]] = image_resizeinput_array = image_pad / 255.input_array = input_array[None][None].astype(np.float32)return input_array, scaledef infer(self, imageData0, imageData1):# (1, 1, 1024, 1024)input_array0, scale0 = self.preprocess(imageData0)input_array1, scale1 = self.preprocess(imageData1)# ['kpts0', 'kpts1', 'matches0', 'matches1', 'mscores0', 'mscores1']output_dict = {}output_name = [i.name for i in self.ort_session.get_outputs()]preds = self.ort_session.run(None, {self.ort_session.get_inputs()[0].name: input_array0,self.ort_session.get_inputs()[1].name: input_array1}) for i, name in enumerate(output_name):output_dict[name] = preds[i]kpts0 = output_dict['kpts0'] * scale0kpts1 = output_dict['kpts1'] * scale1matches0 = output_dict['matches0']matches1 = output_dict['matches1']mscores0 = output_dict['mscores0']mscores1 = output_dict['mscores1']return kpts0, kpts1, matches0, matches1, mscores0, mscores1def make_matching_plot_fast(self, image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, show_keypoints=False, margin=10):H0, W0, C0 = image0.shapeH1, W1, C1 = image1.shapeH, W = max(H0, H1), W0 + W1 + marginout = 255*np.ones((H, W, 3), np.uint8)out[:H0, :W0, :C0] = image0out[:H1, W0+margin:, :C1] = image1if show_keypoints:kpts0, kpts1 = np.round(kpts0).astype(int), np.round(kpts1).astype(int)white = (255, 255, 255)black = (0, 0, 0)for x, y in kpts0:cv2.circle(out, (x, y), 2, black, -1, lineType=cv2.LINE_AA)cv2.circle(out, (x, y), 1, white, -1, lineType=cv2.LINE_AA)for x, y in kpts1:cv2.circle(out, (x + margin + W0, y), 2, black, -1,lineType=cv2.LINE_AA)cv2.circle(out, (x + margin + W0, y), 1, white, -1,lineType=cv2.LINE_AA)mkpts0, mkpts1 = np.round(mkpts0).astype(int), np.round(mkpts1).astype(int)color = (np.array(color[:, :3])*255).astype(int)[:, ::-1]for (x0, y0), (x1, y1), c in zip(mkpts0, mkpts1, color):c = c.tolist()cv2.line(out, (x0, y0), (x1 + margin + W0, y1),color=c, thickness=1, lineType=cv2.LINE_AA)# display line end-points as circlescv2.circle(out, (x0, y0), 2, c, -1, lineType=cv2.LINE_AA)cv2.circle(out, (x1 + margin + W0, y1), 2, c, -1, lineType=cv2.LINE_AA)return outdef glue(self, glue_model, a_image_dir, b_image_dir, detect_model):image_align_obj = self.image_align(glue_model, a_image_dir, b_image_dir, detect_model)for index, (a_image, c_image, image_filename) in enumerate(image_align_obj):yield a_image, c_image, image_filenamedef image_align(self, glue_model, a_image_dir, b_image_dir, detect_model):# 保存对齐图片与特征点匹配结果图temp_dir = os.path.join(os.path.dirname(a_image_dir), "temp_res")match_dir = os.path.join(os.path.dirname(a_image_dir), "match_res")if not os.path.exists(temp_dir):os.makedirs(temp_dir, exist_ok=True)if not os.path.exists(match_dir):os.makedirs(match_dir, exist_ok=True)all_images = [i for i in os.listdir(a_image_dir) if os.path.splitext(i)[-1] in ['.png', '.jpg']]for image_filename in all_images:start_time = time.time()a_image_path = os.path.join(a_image_dir, image_filename)b_image_path = os.path.join(b_image_dir, image_filename)if not os.path.exists(b_image_path):continuea_image = cv2.imread(a_image_path, cv2.IMREAD_COLOR)b_image = cv2.imread(b_image_path, cv2.IMREAD_COLOR)kpts0, kpts1, matches0, matches1, mscores0, mscores1 = self.infer(a_image, b_image)if matches0.size > 0:valid = np.bitwise_and(mscores0 > 0.4, matches0 > -1)mkpts0 = kpts0[valid]mkpts1 = kpts1[0, matches0[valid]]color = cm.jet(mscores0[valid])# 图像对齐,使用RANSAC+Homography。如果匹配点数量小于40则不进行对齐,直接保存图2原图if len(mkpts0) >= 20 and len(mkpts1) >= 20:H, status = cv2.findHomography(np.expand_dims(mkpts0, 1), np.expand_dims(mkpts1, 1), cv2.RANSAC, ransacReprojThreshold=2.0)if H is not None:b_image_align = cv2.warpPerspective(b_image, H, (a_image.shape[1], a_image.shape[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)else:b_image_align = b_imageelse:b_image_align = b_imagec_image = self.image_fusion(a_image, b_image_align, detect_model)  # HWCend_time = time.time()print(f"图像组 {image_filename} 对齐时间为 => {end_time - start_time}s")# 是否保存中间结果if self.save_tmp:result_image = self.make_matching_plot_fast(a_image, b_image, mkpts0, mkpts1, mkpts0, mkpts1, color, show_keypoints=False)cv2.imwrite(os.path.join(temp_dir, image_filename), c_image)cv2.imwrite(os.path.join(match_dir, image_filename), result_image)yield a_image, c_image, image_filenamedef image_fusion(self, a_image, b_image_align, detect_model):#融合图片a_image = a_image.astype(np.float32)b_image_align = b_image_align.astype(np.float32)if detect_model == "ADD":c_image = a_image / 2 + b_image_align / 2elif detect_model == "ADD_radio":c_image = a_image * 0.2 + b_image_align * 0.8return np.ascontiguousarray(c_image)if __name__ == '__main__':onnx_model = 'weights/superpoint_1024_lightglue_end2end.onnx'a_image_dir = 'img/A'b_image_dir = 'img/B'detect_model = 'ADD_radio'lightglue = SuperPointLightGlue(onnx_model, save_tmp=True)obj = lightglue.glue(lightglue, a_image_dir, b_image_dir, detect_model)for i, (a_image, c_image, image_filename) in enumerate(obj):# print(image_filename)continue

2 运行结果可视化

在这里插入图片描述
在这里插入图片描述


http://www.mrgr.cn/news/33839.html

相关文章:

  • AtCoder Regular Contest 156 C. Tree and LCS(思维题 构造 数学归纳法)
  • Java 入门基础篇08 - Java的变量与数据类型的认识
  • 解决RabbitMQ设置x-max-length队列最大长度后不进入死信队列
  • 机器学习查漏补缺(5)
  • 2024年中国科技核心期刊目录(自然科学卷)科技统计源核心(续)
  • MySQL FLOAT 不准问题解析
  • nginx网站服务
  • iOS V2签名网站系统源码,开源免授权(含视频教程)
  • GNU编译器(GCC):编译的4个过程及.elf、.list、.map文件功能说明
  • 【Android】BottomSheet基本用法总结(BottomSheetDialog,BottomSheetDialogFragment)
  • 聚簇索引和非聚簇索引的定义和区别
  • Codeforces Round 974 (Div. 3) G. Milky Days
  • 布草洗涤-酒店分楼层统计报表--———未来之窗行业应用跨平台架构
  • 中小企业体系技术抽象沉淀-异地灾备篇
  • Linux:环境变量
  • 【9月22日小雪】A股下周趋势分析
  • 自然语言处理NLP:文本预处理Text Pre-Processing
  • 【Python报错已解决】TypeError: ‘<‘ not supported between instances of ‘str‘ and ‘int‘
  • 浅谈Linux中文件与目录的ACL
  • python函数的一些介绍