TVM入门:TVM编译Pytorch模型工作流程

TVM入门:TVM编译Pytorch模型工作流程,第1张

TVM编译Pytorch模型工作流程

根据TVM参考文档整理,并添加了一些注释。该示例比较简单,适合学习TVM的同学入门阅读。
TVM官方文档参考

0.导包
import tvm
from tvm import relay

import numpy as np

from tvm.contrib.download import download_testdata

# PyTorch imports
import torch
import torchvision
1.加载一个模型
  • 这里以resnet18为例
model_name = "resnet18"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()
2.得到TorchScripted模型
  • 在后续的relay生成tvm.IRModule时,第一个参数使用script_module : TopLevelTracedModule object
# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
# https://pytorch.org/docs/stable/generated/torch.jit.trace.html
# 通过即时编译对模型优化,跟踪优化的原理:将 *** 作转移为张量,list,字典或者 张量的tuple的 *** 作
scripted_model = torch.jit.trace(model, input_data).eval()
3.加载一张图像
from PIL import Image

# img_url = "http://www.kaotop.com/file/tupian/20220508/cat.png?raw=true"
# img_url = "http://www.kaotop.com/file/tupian/20220508/cat.png"
# img_path = download_testdata(img_url, "cat.png", module="data")
# img = Image.open(img_path).resize((224, 224))
img = Image.open('./cat.png').resize((224, 224))

# Preprocess the image and convert to tensor
from torchvision import transforms

my_preprocess = transforms.Compose(
    [
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ]
)
img = my_preprocess(img)
img = np.expand_dims(img, 0)
4.把计算图导入Relay
  • 把Pytorch的计算图转为Relay的计算图,输入名称可以是任意的
input_name = "input0"
shape_list = [(input_name, img.shape)]
# 导入前端定义的模型
# Load PyTorch model in the form of a scripted PyTorch model and convert into relay.
# mod:在这上面执行模型的优化 tvm.IRModule The module that optimizations will be performed on.
# params:Dict of converted parameters stored in tvm.runtime.ndarray format
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
5.Relay Build
  • 把Relay计算图安装输入规范编译为factory_module(可移植的模型)
# Target device information 
target = tvm.target.Target("llvm", host="llvm")
# Construct a CPU device
dev = tvm.cpu(0)

# The basis where a Relay optimization/analysis runs on.
# 通过上下文包含的辅助信息进行优化
# opt_level:The optimization level of this pass.
with tvm.transform.PassContext(opt_level=3):
    # factory_module : tvm.relay.backend.executor_factory.ExecutorFactoryModule
    # The runtime factory for the TVM graph executor.
    # lib: 代理模型/可移植的模型,运行时的 TVM 计算图执行器代理
    lib = relay.build(mod, target=target, params=params)
6.在TVM上运行可移植的模型
from tvm.contrib import graph_executor

dtype = "float32"
# a new runtime.Module, wrap with graph module.
m = graph_executor.GraphModule(lib["default"](dev))
# Set inputs
m.set_input(input_name, tvm.nd.array(img.astype(dtype)))
# Execute
m.run()
# Get outputs
# shape=(1, 1000)
tvm_output = m.get_output(0)
7.输出图片分类结果
  • 输出TVM图片分类结果
  • 同时使用Pytorch模型进行分类,也输出结果
  • 经试验验证,两者结果未出现不一致的情况
# Imagenet 词集
# synset_url = "".join(
#     [
#         "https://raw.githubusercontent.com/Cadene/",
#         "pretrained-models.pytorch/master/data/",
#         "imagenet_synsets.txt",
#     ]
# )
# synset_name = "imagenet_synsets.txt"
# synset_path = download_testdata(synset_url, synset_name, module="data")
# 这个文件包含的是 类别ID和类别名称的对应,key_to_classname
synset_path = "./imagenet_synsets.txt"
with open(synset_path) as f:
    synsets = f.readlines()

# Python strip() 方法用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列。
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
# class_url = "".join(
#     [
#         "https://raw.githubusercontent.com/Cadene/",
#         "pretrained-models.pytorch/master/data/",
#         "imagenet_classes.txt",
#     ]
# )
# class_name = "imagenet_classes.txt"
# class_path = download_testdata(class_url, class_name, module="data")
class_path = "./imagenet_classes.txt"
with open(class_path) as f:
    class_id_to_key = f.readlines()
# class_id_to_key:类别ID(编号) 一共1000个类别
class_id_to_key = [x.strip() for x in class_id_to_key]

# Get top-1 result for TVM
top1_tvm = np.argmax(tvm_output.numpy()[0])
tvm_class_key = class_id_to_key[top1_tvm]

# Convert input to PyTorch variable and get PyTorch result for comparison
with torch.no_grad():
    torch_img = torch.from_numpy(img)
    output = model(torch_img)

    # Get top-1 result for PyTorch
    top1_torch = np.argmax(output.numpy())
    torch_class_key = class_id_to_key[top1_torch]

print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key]))
print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key]))
  • 结果

欢迎分享,转载请注明来源:内存溢出

原文地址: https://outofmemory.cn/langs/873810.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-13
下一篇 2022-05-13

发表评论

登录后才能评论

评论列表(0条)

保存