def test_build_module_graph(self): big_model = BigModel() g = build_module_graph(big_model, torch.randn(2, 1, 28, 28)) print(g.name_to_node.keys()) leaf_modules = set([ 'backbone1.conv1', 'backbone2.bn1', 'backbone2.bn2', 'backbone2.conv1', 'backbone2.conv2', 'backbone2.fc1', 'backbone2.fc2', 'fc3' ]) assert set(g.leaf_modules) == leaf_modules assert not leaf_modules - set(g.name_to_node.keys()) assert g.find_successors('backbone2.conv1') == ['backbone2.bn1'] assert g.find_successors('backbone2.conv2') == ['backbone2.bn2'] assert g.find_predecessors('backbone2.bn1') == ['backbone2.conv1'] assert g.find_predecessors('backbone2.bn2') == ['backbone2.conv2']
def __init__(self, model, dummy_input, masks_file, map_location=None): """ Parameters ---------- model : pytorch model The model user wants to speed up dummy_input : pytorch tensor The dummy input for ```jit.trace```, users should put it on right device before pass in masks_file : str The path of user provided mask file map_location : str the device on which masks are placed, same to map_location in ```torch.load``` """ self.bound_model = model self.masks = torch.load(masks_file, map_location) self.inferred_masks = dict() # key: module_name, value: ModuleMasks self.torch_graph = build_module_graph(model, dummy_input)
for modeltype, dir in enumerate(Dirs): dir_path = os.path.join(Prefix, dir) files = os.listdir(dir_path) print(files) for filename in files: file_path = os.path.join(dir_path, filename) with open(file_path, 'r') as jf: data = json.load(jf) for model in data: total_graph_count += 1 model_cfg = model[0] latencies = model[1] bound_model = build_model(construct_model_func[modeltype], model_cfg) # print(bound_model) torch_graph = build_module_graph(bound_model, dummy_input) op_nodes = torch_graph.nodes_py.nodes_op n_count = len(op_nodes) # write the graph indicator write_graph_indicator(n_count, total_graph_count) node_id = {} for i in range(1, n_count+1): cur_nodeid = total_node_count + i unique_name = op_nodes[i-1].unique_name node_id[unique_name] = cur_nodeid # write the graph adjacent matrix write_graph_adjacent(node_id, torch_graph) write_node_label(torch_graph) write_graph_label(str(type(bound_model))) model_latency = latencies['model'][2:-2] latency_mean, latency_std = np.mean(model_latency), np.std(model_latency)