前言

学习资料:
TensorRT 源码示例
官方文档:Working With TensorRT Using The Python API
官方文档:TensorRT Python
官方文档:CUDA Python
B站视频教程
视频配套代码 cookbook

示例:解析 ONNX 模型

参考源码:cookbook → 04-BuildEngineByONNXParser → pyTorch-ONNX-TensorRT

源码

  cookbook 中自定义了一个网络并在 MNIST 数据集上进行训练,然后保存为 ONNX 并用 TensorRT 读取生成引擎并进行推理。这里对代码进行了简化,直接用 Pytorch 提供的训练好的 ResNet18,存为 ONNX 然后推理。
  测试数据使用了 TensorRT 中自带的数据,./TensorRT-8.6.1.6/data/resnet50 下有4张图片以及标签 class_labels.txt。将图像复制到 ./data/images 下,标签复制到 ./data 下即可。

import os
import numpy as np
from PIL import Image

import torch
import torchvision.models as models
import torchvision.transforms as transforms

import tensorrt as trt
from cuda import cudart

bUseFP16Mode = False
bUseINT8Mode = True

h, w = 224, 224
dataPath = 'data/images'
imgFiles = [os.path.join(dataPath, f) for f in os.listdir(dataPath)]
labelFile = 'data/class_labels.txt'
onnxFile = 'resnet18.onnx'
if bUseFP16Mode:
    trtFile = 'fp16.plan'
elif bUseINT8Mode:
    trtFile = 'int8.plan'
else:
    trtFile = 'fp32.plan'

batch_size = len(imgFiles)
with open(labelFile, 'r') as f:
    label = np.array(f.readlines())

# 准备数据
transform = transforms.Compose([
    transforms.Resize((h, w)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])


def load_images(image_paths):
    images = [Image.open(image_path) for image_path in image_paths]
    tensors = [transform(image).unsqueeze(0) for image in images]
    res = torch.cat(tensors, dim=0)
    return res


input_tensor = load_images(imgFiles).cuda()

weights = models.ResNet18_Weights.DEFAULT
model = models.resnet18(weights=weights, progress=False).eval().cuda()

result = model(input_tensor)

print(f'PyTorch results: {label[result.argmax(dim=1).cpu()]}')

# 导出 ONNX
torch.onnx.export(
    model,
    torch.randn(1, 3, h, w, device='cuda'),
    onnxFile,
    input_names=['x'],
    output_names=['y'],
    do_constant_folding=True,
    verbose=True,
    keep_initializers_as_inputs=True,
    opset_version=12,
    dynamic_axes={'x': {0: 'nBatchSize'}, 'y': {0: 'nBatchSize'}}
)


# INT8 模式校准器
class MyCalibrator(trt.IInt8EntropyCalibrator2):

    def __init__(self, data_path, n_calibration, input_shape, cache_file):
        trt.IInt8EntropyCalibrator2.__init__(self)
        self.imageList = [os.path.join(data_path, f) for f in os.listdir(data_path)]
        self.nCalibration = n_calibration
        self.shape = input_shape
        self.bufferSize = trt.volume(input_shape) * trt.float32.itemsize
        self.cacheFile = cache_file
        _, self.dIn = cudart.cudaMalloc(self.bufferSize)
        self.oneBatch = self.batch_generator()

    def __del__(self):
        cudart.cudaFree(self.dIn)

    def batch_generator(self):
        for i in range(self.nCalibration):
            print("> calibration %d" % i)
            sub_images = np.random.choice(self.imageList, self.shape[0], replace=False)
            yield np.ascontiguousarray(load_images(sub_images).numpy())

    def get_batch_size(self):  # necessary API
        return self.shape[0]

    def get_batch(self, nameList=None, inputNodeName=None):  # necessary API
        try:
            data = next(self.oneBatch)
            cudart.cudaMemcpy(self.dIn, data.ctypes.data, self.bufferSize, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
            return [int(self.dIn)]
        except StopIteration:
            return None

    def read_calibration_cache(self):  # necessary API
        if os.path.exists(self.cacheFile):
            print("Succeed finding cache file: %s" % self.cacheFile)
            with open(self.cacheFile, "rb") as f:
                cache = f.read()
                return cache
        else:
            print("Failed finding int8 cache!")
            return

    def write_calibration_cache(self, cache):  # necessary API
        with open(self.cacheFile, "wb") as f:
            f.write(cache)
        print("Succeed saving int8 cache!")
        return


# 构建期
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30)
if bUseFP16Mode:
    config.set_flag(trt.BuilderFlag.FP16)
if bUseINT8Mode:
    config.set_flag(trt.BuilderFlag.INT8)
    config.int8_calibrator = MyCalibrator(dataPath, 1, (4, 3, h, w), 'int8.cache')

# 加载 ONNX
parser = trt.OnnxParser(network, logger)
with open(onnxFile, "rb") as model:
    if not parser.parse(model.read()):
        print("Failed parsing .onnx file!")
        for error in range(parser.num_errors):
            print(parser.get_error(error))
        exit()
    print("Succeeded parsing .onnx file!")

inputTensor = network.get_input(0)
profile.set_shape(inputTensor.name, [1, 3, h, w], [4, 3, h, w], [8, 3, h, w])
config.add_optimization_profile(profile)
# 生成序列化网络
engineString = builder.build_serialized_network(network, config)
with open(trtFile, "wb") as f:
    f.write(engineString)

# 运行期
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)

context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [batch_size, 3, h, w])

inputHost = np.ascontiguousarray(input_tensor.cpu().numpy())
outputHost = np.empty(
    context.get_tensor_shape(lTensorName[1]),
    dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[1]))
)

_, inputDevice = cudart.cudaMalloc(inputHost.nbytes)
_, outputDevice = cudart.cudaMalloc(outputHost.nbytes)
context.set_tensor_address(lTensorName[0], inputDevice)
context.set_tensor_address(lTensorName[1], outputDevice)

cudart.cudaMemcpy(inputDevice, inputHost.ctypes.data, inputHost.nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_async_v3(0)
cudart.cudaMemcpy(outputHost.ctypes.data, outputDevice, outputHost.nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)

print(f'TensorRT results: {label[outputHost.argmax(axis=1)]}')

cudart.cudaFree(inputDevice)
cudart.cudaFree(outputDevice)

代码解析

  代码整体流程就是加载 PyTorch 提供的预训练模型 ResNet-18 并在几张 ImageNet 数据上进行推理,然后把模型保存为 ONNX,最后用 TensorRT 中的 Parser 加载模型进行推理。
  PyTorch 模型的存储路径为 /home/xxx/.cache/torch/hub/checkpoints
  比较陌生的部分在于导出 ONNX 所使用的 API torch.onnx.export(),以及使用 INT8 模式时需要额外编写一个校准器。

(1)导出 ONNX 模型:官方文档
部分参数解释:
do_constant_folding=True 是否进行常量折叠
verbose=True 是否打印详细信息
keep_initializers_as_inputs=True 是否将模型参数作为输入,个人理解是当设为 True 时,模型的参数是不固定的,是输入的一部分,这样可以加载不同参数的模型。而设为 False 时,模型的参数被固定了,但会更有利于优化加速。
dynamic_axes 设定动态轴

(2)INT8 模式下的校准器
config.int8_calibrator = MyCalibrator(dataPath, 1, (4, 3, h, w), 'int8.cache')

  INT8 模式需要确定每个权重张量的量化范围,以便在量化时保持模型的精度。此处算是作弊了,用推理的数据作为校准数据,并且校准时也把 4 张图像作为一个 batch 输入,从而得到的结果相同。若设为 config.int8_calibrator = MyCalibrator(dataPath, 5, (1, 3, h, w), 'int8.cache') 会发现结果有所不同。

  input_shape 对应了输入形状和 batch size。校准器的作用就是从输入的数据集中随机挑选 batch size 个样本,循环校准 n_calibration 次。

踩坑

  最初仅使用 Numpy 做数据预处理:

image = Image.open(imgFile).resize((224, 224))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = (np.array(image, dtype=np.float32) / 255 - mean) / std
image = np.expand_dims(image.transpose((2, 0, 1)), axis=0)
input_tensor = torch.from_numpy(image).cuda()

  报错:RuntimeError: Input type (torch.cuda.DoubleTensor) and weight type (torch.cuda.FloatTensor) should be the same
  原因是第 4 行做标准化计算时,会把数据类型自动转变为 float64。若使用 torch.FloatTensor(image) 可解决报错,但是在 TensorRT 推理时使用 inputHost = np.ascontiguousarray(image) 会导致推理结果不同但没有报错,必须使用一样的 float32 数据。这里因为在 Pytorch 上推理了,后续用 input_tensor.cpu().numpy() 很安全,但实际使用会跳过这一步直接在 TensorRT 上推理,要小心数据类型问题。

示例:PyTorch 框架内 TensorRT 接口

参考源码:cookbook → 06-UseFrameworkTRT → Torch-TensorRT
PyTorch 官方示例

源码

  省略加载数据部分,与上个示例相同。测试时发现启用 TorchScript 会让推理速度加快很多。

import torch_tensorrt


TorchScript = True
if TorchScript:
    model = torch.jit.trace(model, torch.randn(batch_size, 3, h, w, device="cuda"))
    
optimized_model = torch_tensorrt.compile(
    model,
    inputs=[torch.randn((batch_size, 3, h, w)).float().cuda()],
    enabled_precisions=torch.float,
    debug=True,
)

optimized_result = optimized_model(input_tensor)
print(f'Torch TensorRT results: {label[optimized_result.argmax(dim=1).cpu()]}')
02-05 08:34