当前位置: 首页 > news >正文

CLIP图文多模态模型onnxruntime和tensorrt推理

首先下载github项目:https://github.com/Lednik7/CLIP-ONNX
修改clip_onnx/utils.py第61行opset_version=12为opset_version=15 , 运行测试脚本:

import clip
from PIL import Image
import numpy as np# ONNX不支持CUDA
model, preprocess = clip.load("ViT-B/32", device="cpu", jit=False)# 批量优先
image = preprocess(Image.open("CLIP.png")).unsqueeze(0).cpu()  # [1, 3, 224, 224]
image_onnx = image.detach().cpu().numpy().astype(np.float32)# 批量优先
text = clip.tokenize(["a diagram", "a dog", "a cat"]).cpu()  # [3, 77]
text_onnx = text.detach().cpu().numpy().astype(np.int32)from clip_onnx import clip_onnxonnx_model = clip_onnx(model, visual_path="clip_visual.onnx", textual_path="clip_textual.onnx")
onnx_model.convert2onnx(image, text, verbose=True)
onnx_model.start_sessions(providers=["CPUExecutionProvider"])  # CPU模式image_features = onnx_model.encode_image(image_onnx)
text_features = onnx_model.encode_text(text_onnx)logits_per_image, logits_per_text = onnx_model(image_onnx, text_onnx)
probs = logits_per_image.softmax(dim=-1).detach().cpu().numpy()
print("标签概率:", probs)onnx_model = clip_onnx(None)
onnx_model.load_onnx(visual_path="clip_visual.onnx", textual_path="clip_textual.onnx", logit_scale=100.0000)  # model.logit_scale.exp()
onnx_model.start_sessions(providers=["CPUExecutionProvider"])
image_features = onnx_model.encode_image(image_onnx)
text_features = onnx_model.encode_text(text_onnx)logits_per_image, logits_per_text = onnx_model(image_onnx, text_onnx)
probs = logits_per_image.softmax(dim=-1).detach().cpu().numpy()
print("标签概率:", probs)

onnxruntime推理

import clip
from PIL import Image
import numpy as np
import onnxruntimemodel, preprocess = clip.load("ViT-B/32", device="cpu", jit=False)
image = preprocess(Image.open("CLIP.png")).unsqueeze(0).cpu()  # [1, 3, 224, 224]
image_input = image.detach().cpu().numpy().astype(np.float32)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).cpu()  # [3, 77]
text_input = text.detach().cpu().numpy().astype(np.int32)class clip_onnx:def __init__(self, model=None, visual_path: str = "clip_visual.onnx", textual_path: str = "clip_textual.onnx", logit_scale=None):self.model = modelself.visual_path = visual_pathself.textual_path = textual_pathself.logit_scale = logit_scaledef start_sessions(self, providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']):self.visual_session = onnxruntime.InferenceSession(self.visual_path, providers=providers)self.textual_session = onnxruntime.InferenceSession(self.textual_path, providers=providers)def __call__(self, image, text, device: str = "cpu"):onnx_input_image = {self.visual_session.get_inputs()[0].name: image}image_features, = self.visual_session.run(None, onnx_input_image)onnx_input_text = {self.textual_session.get_inputs()[0].name: text}text_features, = self.textual_session.run(None, onnx_input_text)image_features = image_features /np.linalg.norm(image_features, axis=-1, keepdims=True)text_features = text_features / np.linalg.norm(text_features, axis=-1, keepdims=True)logits_per_image = self.logit_scale * image_features @ text_features.Tlogits_per_text = logits_per_image.Treturn logits_per_image, logits_per_textonnx_model = clip_onnx(visual_path="clip_visual.onnx", textual_path="clip_textual.onnx", logit_scale=100.0000)  
onnx_model.start_sessions(providers=["CPUExecutionProvider"])
logits_per_image, logits_per_text = onnx_model(image_input, text_input)def softmax(x, axis=-1):e_x = np.exp(x - np.max(x, axis=axis, keepdims=True))return e_x / np.sum(e_x, axis=axis, keepdims=True)probs = softmax(logits_per_image)
print("标签概率:", probs)

tensorrt推理

要使得tensorrt正常推理还需注释掉clip_onnx/utils.py第63行的dynamic_axes,再生成onnx模型。
通过trtexec 转换onnx模型得到engine文件:

/docker_share/TensorRT-8.6.1.6/bin/trtexec --onnx=./clip_textual.onnx --saveEngine=./clip_textual.engine 
/docker_share/TensorRT-8.6.1.6/bin/trtexec --onnx=./clip_visual.onnx --saveEngine=./clip_visual.engine
import clip
from PIL import Image
import numpy as np
import tensorrt as trt
import pycuda.autoinit 
import pycuda.driver as cuda model, preprocess = clip.load("ViT-B/32", device="cpu", jit=False)
image = preprocess(Image.open("CLIP.png")).unsqueeze(0).cpu()  # [1, 3, 224, 224]
image_input = image.detach().cpu().numpy().astype(np.float32)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).cpu()  # [3, 77]
text_input = text.detach().cpu().numpy().astype(np.int32)class clip_tensorrt:def __init__(self, model=None, visual_path: str = "clip_visual.engine", textual_path: str = "clip_textual.engine", logit_scale=None):self.model = modelself.visual_path = visual_pathself.textual_path = textual_pathself.logit_scale = logit_scaledef start_engines(self, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']):logger = trt.Logger(trt.Logger.WARNING)with open(self.visual_path, "rb") as f, trt.Runtime(logger) as runtime:self.visual_engine = runtime.deserialize_cuda_engine(f.read())self.visual_context = self.visual_engine.create_execution_context()self.visual_inputs_host = cuda.pagelocked_empty(trt.volume(self.visual_context.get_binding_shape(0)), dtype=np.float32)self.visual_outputs_host = cuda.pagelocked_empty(trt.volume(self.visual_context.get_binding_shape(1)), dtype=np.float32)self.visual_inputs_device = cuda.mem_alloc(self.visual_inputs_host.nbytes)self.visual_outputs_device = cuda.mem_alloc(self.visual_outputs_host.nbytes)self.visual_stream = cuda.Stream()with open(self.textual_path, "rb") as f, trt.Runtime(logger) as runtime:self.textual_engine = runtime.deserialize_cuda_engine(f.read())self.textual_context = self.textual_engine.create_execution_context()self.textual_inputs_host = cuda.pagelocked_empty(trt.volume(self.textual_context.get_binding_shape(0)), dtype=np.int32)self.textual_outputs_host = cuda.pagelocked_empty(trt.volume(self.textual_context.get_binding_shape(1)), dtype=np.float32)self.textual_inputs_device = cuda.mem_alloc(self.textual_inputs_host.nbytes)self.textual_outputs_device = cuda.mem_alloc(self.textual_outputs_host.nbytes)self.textual_stream = cuda.Stream()def __call__(self, image, text):np.copyto(self.visual_inputs_host, image.ravel())with self.visual_engine.create_execution_context() as context:cuda.memcpy_htod_async(self.visual_inputs_device, self.visual_inputs_host, self.visual_stream)context.execute_async_v2(bindings=[int(self.visual_inputs_device), int(self.visual_outputs_device)], stream_handle=self.visual_stream.handle)cuda.memcpy_dtoh_async(self.visual_outputs_host, self.visual_outputs_device, self.visual_stream)self.visual_stream.synchronize()  np.copyto(self.textual_inputs_host, text.ravel())with self.textual_engine.create_execution_context() as context:cuda.memcpy_htod_async(self.textual_inputs_device, self.textual_inputs_host, self.textual_stream)context.execute_async_v2(bindings=[int(self.textual_inputs_device), int(self.textual_outputs_device)], stream_handle=self.textual_stream.handle)cuda.memcpy_dtoh_async(self.textual_outputs_host, self.textual_outputs_device, self.textual_stream)self.textual_stream.synchronize()  image_features = self.visual_outputs_host.reshape(1,512)text_features = self.textual_outputs_host.reshape(3,512)image_features = image_features /np.linalg.norm(image_features, axis=-1, keepdims=True)text_features = text_features / np.linalg.norm(text_features, axis=-1, keepdims=True)logits_per_image = self.logit_scale * image_features @ text_features.Tlogits_per_text = logits_per_image.Treturn logits_per_image, logits_per_textonnx_model = clip_tensorrt(visual_path="clip_visual.engine", textual_path="clip_textual.engine", logit_scale=100.0000)  
onnx_model.start_engines()
logits_per_image, logits_per_text = onnx_model(image_input, text_input)def softmax(x, axis=-1):e_x = np.exp(x - np.max(x, axis=axis, keepdims=True))return e_x / np.sum(e_x, axis=axis, keepdims=True)probs = softmax(logits_per_image)
print("标签概率:", probs)

CLIP.png
在这里插入图片描述

正常运行得到结果

标签概率: [[0.9927926  0.00421789 0.00298956]]

http://www.mrgr.cn/news/46035.html

相关文章:

  • GoJsonQ 学习与使用指南
  • 作业4-23
  • SQL调优指南及高级SQL技巧
  • QString与进制数互相转换
  • 数据结构前置知识(上)
  • 世界创侠 —— 程序员不止码码
  • MagnTek·新品 | 新一代接近开关传感器解决方案MTL200+MT5301
  • 2024骨传导耳机指南:哪款最值得买?分享五款高品质骨传导耳机
  • 如何做好运营计划
  • 服务器、jvm、数据库的CPU飙高怎么处理
  • js数组的方法
  • Unity射击游戏开发教程:(37)给BOSS加个护盾,射击和移动
  • 记录|Modbus-TCP产品使用记录【四川零点】
  • MounRiver Studio编译报错:fatal error:cannot find ‘ld‘
  • 2024年日语翻译TOP3工具,精准高效,让沟通无界
  • 气膜体育馆在学校中的应用:创新教育设施的未来—轻空间
  • 0基础跟德姆(dom)一起学AI 机器学习05-决策树
  • Linux操作系统——软件包的管理(实验报告)
  • MYSQL批量UPDATE的两种方式
  • Java面试题——第十篇