TensorRT Python Inference

Introduction

Building a TensorRT engine and profile its performance are straightforward using the TensorRT command-line interface (CLI) trtexec for most of the use cases. However, verifying the correctness of the TensorRT engine via trtexec can be a little bit awkward. Instead of using TensorRT C++ API to run inference for verification, we would like to use TensorRT Python API to run inference for verification because Python has rich libraries for data manipulation and visualization, and it does not require building to run the application.

In this blog post, we will discuss how to use TensorRT Python API to run inference with a pre-built TensorRT engine and a custom plugin in a few lines of code using utilities created using CUDA-Python APIs.

TensorRT Python Inference

The TensorRT Python inference utilities and example can be found in the TensorRT Python Inference GitHub repository. It assumes that the TensorRT engine and the custom plugin has been built following the instruction.

TensorRT Python Inference Utilities

To run inference with TensorRT, the user will need to manage the memory buffers for the input and output tensors and some other CUDA resources using CUDA Python APIs from NVIDIA.

The following Python utilities are sufficient for running inference with pre-built TensorRT engines and custom plugins in Python for most of the use cases.

common.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
# Slightly modified from
# https://github.com/NVIDIA/TensorRT/blob/c0c633cc629cc0705f0f69359f531a192e524c0f/samples/python/common.py

#
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import ctypes
from typing import Optional, List

import numpy as np
import tensorrt as trt
from cuda import cuda, cudart

try:
# Sometimes python does not understand FileNotFoundError
FileNotFoundError
except NameError:
FileNotFoundError = IOError

EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)


def check_cuda_err(err):
if isinstance(err, cuda.CUresult):
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Cuda Error: {}".format(err))
if isinstance(err, cudart.cudaError_t):
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError("Cuda Runtime Error: {}".format(err))
else:
raise RuntimeError("Unknown error type: {}".format(err))


def cuda_call(call):
err, res = call[0], call[1:]
check_cuda_err(err)
if len(res) == 1:
res = res[0]
return res


def GiB(val):
return val * 1 << 30


class HostDeviceMem:
"""Pair of host and device memory, where the host memory is wrapped in a numpy array"""

def __init__(self,
size: int,
dtype: np.dtype,
name: Optional[str] = None,
shape: Optional[trt.Dims] = None,
format: Optional[trt.TensorFormat] = None):
nbytes = size * dtype.itemsize
host_mem = cuda_call(cudart.cudaMallocHost(nbytes))
pointer_type = ctypes.POINTER(np.ctypeslib.as_ctypes_type(dtype))

self._host = np.ctypeslib.as_array(ctypes.cast(host_mem, pointer_type),
(size, ))
self._device = cuda_call(cudart.cudaMalloc(nbytes))
self._nbytes = nbytes
self._name = name
self._shape = shape
self._format = format
self._dtype = dtype

@property
def host(self) -> np.ndarray:
return self._host

@host.setter
def host(self, arr: np.ndarray):
if arr.size > self.host.size:
raise ValueError(
f"Tried to fit an array of size {arr.size} into host memory of size {self.host.size}"
)
np.copyto(self.host[:arr.size], arr.flat, casting='safe')

@property
def device(self) -> int:
return self._device

@property
def nbytes(self) -> int:
return self._nbytes

@property
def name(self) -> Optional[str]:
return self._name

@property
def shape(self) -> Optional[trt.Dims]:
return self._shape

@property
def format(self) -> Optional[trt.TensorFormat]:
return self._format

@property
def dtype(self) -> np.dtype:
return self._dtype

def __str__(self):
return f"Host:\n{self.host}\nDevice:\n{self.device}\nSize:\n{self.nbytes}\n"

def __repr__(self):
return self.__str__()

def free(self):
cuda_call(cudart.cudaFree(self.device))
cuda_call(cudart.cudaFreeHost(self.host.ctypes.data))


# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
# If engine uses dynamic shapes, specify a profile to find the maximum input & output size.
def allocate_buffers(engine: trt.ICudaEngine,
profile_idx: Optional[int] = None):
inputs = []
outputs = []
bindings = []
stream = cuda_call(cudart.cudaStreamCreate())
tensor_names = [
engine.get_tensor_name(i) for i in range(engine.num_io_tensors)
]
for binding in tensor_names:
# get_tensor_profile_shape returns (min_shape, optimal_shape, max_shape)
# Pick out the max shape to allocate enough memory for the binding.
format = engine.get_tensor_format(binding)
shape = engine.get_tensor_shape(
binding
) if profile_idx is None else engine.get_tensor_profile_shape(
binding, profile_idx)[-1]
shape_valid = np.all([s >= 0 for s in shape])
if not shape_valid and profile_idx is None:
raise ValueError(f"Binding {binding} has dynamic shape, " +\
"but no profile was specified.")
size = trt.volume(shape)
if engine.has_implicit_batch_dimension:
size *= engine.max_batch_size
dtype = np.dtype(trt.nptype(engine.get_tensor_dtype(binding)))

# Allocate host and device buffers
bindingMemory = HostDeviceMem(size,
dtype,
name=binding,
shape=shape,
format=format)

# Append the device buffer to device bindings.
bindings.append(int(bindingMemory.device))

# Append to the appropriate list.
if engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
inputs.append(bindingMemory)
else:
outputs.append(bindingMemory)
return inputs, outputs, bindings, stream


# Frees the resources allocated in allocate_buffers
def free_buffers(inputs: List[HostDeviceMem], outputs: List[HostDeviceMem],
stream: cudart.cudaStream_t):
for mem in inputs + outputs:
mem.free()
cuda_call(cudart.cudaStreamDestroy(stream))


# Wrapper for cudaMemcpy which infers copy size and does error checking
def memcpy_host_to_device(device_ptr: int, host_arr: np.ndarray):
nbytes = host_arr.size * host_arr.itemsize
cuda_call(
cudart.cudaMemcpy(device_ptr, host_arr, nbytes,
cudart.cudaMemcpyKind.cudaMemcpyHostToDevice))


# Wrapper for cudaMemcpy which infers copy size and does error checking
def memcpy_device_to_host(host_arr: np.ndarray, device_ptr: int):
nbytes = host_arr.size * host_arr.itemsize
cuda_call(
cudart.cudaMemcpy(host_arr, device_ptr, nbytes,
cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost))


def _do_inference_base(inputs, outputs, stream, execute_async):
# Transfer input data to the GPU.
kind = cudart.cudaMemcpyKind.cudaMemcpyHostToDevice
[
cuda_call(
cudart.cudaMemcpyAsync(inp.device, inp.host, inp.nbytes, kind,
stream)) for inp in inputs
]
# Run inference.
execute_async()
# Transfer predictions back from the GPU.
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost
[
cuda_call(
cudart.cudaMemcpyAsync(out.host, out.device, out.nbytes, kind,
stream)) for out in outputs
]
# Synchronize the stream
cuda_call(cudart.cudaStreamSynchronize(stream))
# Return only the host outputs.
return [out.host for out in outputs]


# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):

def execute_async():
context.execute_async(batch_size=batch_size,
bindings=bindings,
stream_handle=stream)

return _do_inference_base(inputs, outputs, stream, execute_async)


# This function is generalized for multiple inputs/outputs for full dimension networks.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference_v2(context, bindings, inputs, outputs, stream):

def execute_async():
context.execute_async_v2(bindings=bindings, stream_handle=stream)

return _do_inference_base(inputs, outputs, stream, execute_async)
common_runtime.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import os
import ctypes

import tensorrt as trt

# Define global logger object (it should be a singleton,
# available for TensorRT from anywhere in code).
# You can set the logger severity higher to suppress messages
# (or lower to display more messages)
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)


def load_plugin_lib(plugin_lib_file_path):

if os.path.isfile(plugin_lib_file_path):
try:
# Python specifies that winmode is 0 by default, but some implementations
# incorrectly default to None instead. See:
# https://docs.python.org/3.8/library/ctypes.html
# https://github.com/python/cpython/blob/3.10/Lib/ctypes/__init__.py#L343
ctypes.CDLL(plugin_lib_file_path, winmode=0)
except TypeError:
# winmode only introduced in python 3.8
ctypes.CDLL(plugin_lib_file_path)
return

raise IOError(f"Failed to load plugin library: {plugin_lib_file_path}")


def load_engine(engine_file_path):

with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())

TensorRT Python Inference Example

The following Python script demonstrates how to run inference with a pre-built TensorRT engine and a custom plugin from the TensorRT Custom Plugin Example. It can be as simple as a few lines of code using the utilities provided above.

main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import numpy as np

import common
import common_runtime


def main():

engine_file_path = "../data/identity_neural_network.engine"
plugin_lib_file_path = "../build/src/libidentity_conv.so"

common_runtime.load_plugin_lib(plugin_lib_file_path)
engine = common_runtime.load_engine(engine_file_path)

# Profile index is only useful when the engine has dynamic shapes.
inputs, outputs, bindings, stream = common.allocate_buffers(
engine=engine, profile_idx=None)

# Print input tensor information.
print("Input Tensor:")
for host_device_buffer in inputs:
print(
f"Tensor Name: {host_device_buffer.name} Shape: {host_device_buffer.shape} "
f"Data Type: {host_device_buffer.dtype} Data Format: {host_device_buffer.format}"
)
# Print output tensor information.
print("Output Tensor:")
for host_device_buffer in outputs:
print(
f"Tensor Name: {host_device_buffer.name} Shape: {host_device_buffer.shape} "
f"Data Type: {host_device_buffer.dtype} Data Format: {host_device_buffer.format}"
)

# Dummy example.
# Fill each input with random values.
for host_device_buffer in inputs:
data = np.random.uniform(low=-10.0,
high=10.0,
size=host_device_buffer.shape).astype(
host_device_buffer.dtype).flatten()
# Print input tensor data.
print(f"Input Tensor: {host_device_buffer.name}")
print(data)
# Copy data from numpy array to host buffer.
np.copyto(host_device_buffer.host, data)

# Execute the engine.
context = engine.create_execution_context()
common.do_inference_v2(context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream)

# Print output tensor data.
for host_device_buffer in outputs:
print(f"Output Tensor: {host_device_buffer.name}")
print(host_device_buffer.host)

# In our case, the input and output tensor data should be exactly the same.
for input_host_device_buffer, output_host_device_buffer in zip(
inputs, outputs):
np.testing.assert_equal(input_host_device_buffer.host,
output_host_device_buffer.host)

# Clean up.
common.free_buffers(inputs=inputs, outputs=outputs, stream=stream)


if __name__ == "__main__":

main()

The TensorRT engine was built from an identity neural network and the custom plugin was built from an identity convolution layer. The output tensor from the TensorRT engine should be exactly the same as the input tensor.

References

Author

Lei Mao

Posted on

05-18-2024

Updated on

05-18-2024

Licensed under


Comments