forked from yashkant/housekeep
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathget_results.py
140 lines (120 loc) · 5.29 KB
/
get_results.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import os
import json
import torch
from model import GCN
from utils import *
import networkx as nx
import numpy as np
from scipy.sparse import coo_matrix
def main(image_model_type, lang_model_type, model_name, loss, split):
rooms = get_room_names()
objects = get_object_names()
categories = get_category_names()
input_dim = 1024
output_dim = 128
image_model_type_parse = image_model_type.replace('/', '_')
lang_model_type_parse = lang_model_type.replace('/', '_')
relationships = np.load('all_obj_rel.npy', allow_pickle=True).item()
model = GCN(input_dim, output_dim).to("cuda")
# model = DataParallel(model)
model.load_state_dict(torch.load('model2.pth'))
color_dict = {
"bathroom": "#6495ED",
"bedroom": "#8B008B",
"childs_room": "#FFA07A",
"closet": "#F08080",
"corridor": "#FFD700",
"dining_room": "#ADFF2F",
"exercise_room": "#00FFFF",
"garage": "#808080",
"home_office": "#9370DB",
"kitchen": "red",
"living_room": "#FFC0CB",
"lobby": "#87CEFA",
"pantry_room": "#4169E1",
"playroom": "#FF69B4",
"storage_room": "#228B22",
"television_room": "#FF4500",
"utility_room": "green"
}
val_graph = nx.Graph()
room_features = np.load(
f'input_embeddings/room_{lang_model_type_parse}_{input_dim}.npy', allow_pickle=True).item()
obj_features1 = np.load(
f'input_embeddings/train_110_input/train110_{image_model_type_parse}_features.npy', allow_pickle=True).item()
obj_features2 = np.load(
f'input_embeddings/val_27_input/val27_{image_model_type_parse}_features.npy', allow_pickle=True).item()
obj_features3 = np.load(
f'input_embeddings/test_131_input/test131_{image_model_type_parse}_features.npy', allow_pickle=True).item()
print("Found Image Embeddings")
features = {**room_features, **obj_features1,
**obj_features2, **obj_features3}
# for room in rooms:
# val_graph.add_node(
# room,
# name=room,
# features=np.reshape(features[room].cpu().detach().numpy(), (input_dim,))
# )
with open('keys.json', 'r') as f:
keys = json.load(f)
for key in keys:
category, object_name_parse, split, name = key
color = next((color_dict[room] for room in relationships.keys(
) if object_name_parse in relationships[room]), None)
val_graph.add_node(name, name=name, features=np.reshape(
features[category][object_name_parse][split][name].cpu().detach().numpy(), (input_dim,)), color=color)
self_edges = [(node, node) for node in val_graph.nodes()]
val_graph.add_edges_from(self_edges)
val_info2 = nx.get_node_attributes(val_graph, "color")
# Validation Set
adjacency_matrix = torch.from_numpy(
nx.to_numpy_array(val_graph)).to("cuda")
adjacency_matrix[adjacency_matrix != 0] = 1
adjacency_matrix_binary = adjacency_matrix.int()
non_zero_entries = torch.nonzero(adjacency_matrix_binary)
val_edge_index = torch.empty(
2, non_zero_entries.shape[0], dtype=torch.long).to("cuda")
val_edge_index[0, :] = non_zero_entries[:, 0]
val_edge_index[1, :] = non_zero_entries[:, 1] # (2,num of edges)
adjacency_matrix = torch.from_numpy(
nx.to_numpy_array(val_graph)).to("cuda")
coo = coo_matrix(adjacency_matrix.cpu().numpy())
val_edge_weight = torch.tensor(coo.data, dtype=torch.float).cuda()
val_features = [val_graph.nodes[node]['features']
for node in val_graph.nodes()]
val_features = np.reshape(val_features, (len(val_features), input_dim))
val_features = torch.from_numpy(val_features).cuda().float()
print(val_features.shape)
model.eval()
with torch.no_grad():
val_emb = model(val_features.float(), val_edge_index, val_edge_weight)
# visualize(val_emb, color=list(val_info2.values()), filename=f'val_tsne.png')
# room_embeddings = torch.stack([val_emb[_] for _ in range(len(rooms))])
room_dict = np.load(
f'output_room_embeddings/hybrid_{image_model_type_parse}_{lang_model_type_parse}_room_embedding.npy', allow_pickle=True).item()
room_embeddings = []
room_names = []
for name, em in room_dict.items():
room_embeddings.append(em)
room_names.append(name)
room_embeddings = torch.stack(room_embeddings)
calculate_statistics(val_emb, room_embeddings, list(val_graph.nodes),
rooms, relationships, filename='GCN_model_output.txt')
print(get_mAP(val_emb, room_embeddings, list(val_graph.nodes),
rooms, relationships))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--image_model', type=str,
default='clip_ViT-H/14')
parser.add_argument('--lang_model', type=str, default='clip_ViT-H/14')
parser.add_argument('--model', type=str, default='GCN')
parser.add_argument('--loss', type=str, default='margin')
parser.add_argument('--split', type=str, default='test')
args = parser.parse_args()
image_model_type = args.image_model
lang_model_type = args.lang_model
model = args.model
loss = args.loss
split = args.split
main(image_model_type, lang_model_type, model, loss, split)