-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest_speed_v1.py
67 lines (47 loc) · 1.26 KB
/
test_speed_v1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#coding=utf-8
import jittor as jt
from jittor_utils import auto_diff
import torch
import glob
from PIL import Image
import numpy as np
from tqdm import tqdm
from models.vit_v1 import ViT as jt_ViT
from models.vit_pytorch import ViT as torch_ViT
torch_model = torch_ViT(
dim=128,
image_size=224,
patch_size=32,
num_classes=2,
depth=12,
heads=8,
mlp_dim=128
).cuda()
jt.flags.use_cuda=1
jt_model = jt_ViT(
dim=128,
image_size=224,
patch_size=32,
num_classes=2,
depth=12,
heads=8,
mlp_dim=128
)
# # state_dict = torch.load('model.pkl')
# torch_model.load_state_dict(state_dict)
# jt_model.load_parameters(state_dict)
# image = Image.open('test.jpg')
# image = np.array(image)[None,:,:,:]
# image = image.transpose(0,3,1,2).astype(np.float32)
image = np.random.randn(64,3,224,224).astype(np.float32)
# hook = auto_diff.Hook('ViT')
# hook.hook_module(jt_model)
iter_num=1000
for i in tqdm(range(iter_num)):
jt_data = jt.array(image)
jt_output = jt_model(jt_data).numpy()
# print(jt_output)
for i in tqdm(range(iter_num)):
torch_data = torch.tensor(image).cuda()
torch_output = torch_model(torch_data).cpu().detach().numpy()
# assert np.allclose(jt_output,torch_output)