def extract(database): # database_ponzi = path.join('feature','nponzi_feature_raw.csv') color.pInfo("Dealing with transaction data data") raw_data = pd.read_csv(database) raw_data = raw_data.fillna(0) tx_features = [] f_names = [ #'ponzi', 'address', 'nbr_tx_in', 'nbr_tx_out', 'Tot_in', 'Tot_out', 'mean_in', 'mean_out', 'sdev_in', 'sdev_out', 'gini_in', 'gini_out', 'avg_time_btw_tx', # 'gini_time_out', 'lifetime', ] for i in range(raw_data.shape[0]): # ponzi = raw_data.iloc[i]['ponzi'] address = raw_data.iloc[i]['address'] time_in = raw_data.iloc[i]['time_in'] time_out = raw_data.iloc[i]['time_out'] val_in = raw_data.iloc[i]['val_in'] val_out = raw_data.iloc[i]['val_out'] if val_in != '' or val_out != '': #f = tl.basic_features(ponzi, time_in, time_out, val_in, val_out) f = tl.basic_features(None, address, time_in, time_out, val_in, val_out) tx_features.append(f) tl.compute_time(t0) df_features = pd.DataFrame(tx_features, columns=f_names) name = os.path.basename(database).split('.')[0] f_file = os.path.join( 'feature', name.split('_')[0] + '_' + name.split('_')[1] + '_feature.csv') df_features.to_csv(f_file, index=None) color.pDone('Have written feature file ' + f_file + '.')
def time_forward(config, args): model = tools.build_model( config.model.cfg_path, args.weight, None, device=args.device, dataparallel=False, qat=args.qat, quantized=args.quant, backend=args.backend, )[0] size = args.size avg_time = tools.compute_time(model, input_size=(3, size, size), batch_size=args.bs) print(f'{avg_time:.2f} ms')
def generate_model(macs_thres=15e9, time_thres=(0, 100), gen_func=detnet_600m): while True: net = gen_func().cuda() inputs = torch.randn(1, 3, 512, 512).cuda() flops, params = profile(net, inputs=(inputs, ), verbose=False) if flops > macs_thres: continue avg_time = tools.compute_time(net, batch_size=16) if avg_time > time_thres[1] or avg_time < time_thres[0]: continue net.attr = { 'MACs': flops, 'params': params, 'avg_time': avg_time, } print(net.cfg) flops, params = clever_format([flops, params], "%.3f") print('MACs: {}, params: {}, {:.2f} ms'.format(flops, params, avg_time)) yield nn.DataParallel(net)
def open_data(opcodes): t0 = time.clock() print("tools.opend_data: define variables...") path = '/Users/e31989/Desktop/e31989/Documents/sm_database/' database_nml = path + 'normal.json' database_int = path + 'internal.json' database_op = path + 'opcode/opcodes_count/' database_nml_np = path + 'normal_np.json' database_int_np = path + 'internal_np.json' database_op_np = path + 'opcode_np/opcode_count/bytecode_np/' t1 = tl.compute_time(t0) #Open databases to access info print("tools.open_data: open databases...") #ponzi instances with open(database_nml, 'r') as f: raw_nml = f.readlines() with open(database_int, 'r') as f: raw_int = f.readlines() op = [[f[:-5] for f in os.listdir(database_op) if f[-5:] == '.json'], [f[:-5] for f in os.listdir(database_op_np) if f[-5:] == '.json']] op_freq = [[], []] for add in op[0]: with open(database_op + add + '.json', 'r') as f: raw = f.readlines() res = [0 for i in range(len(opcodes))] if len(raw) > 1: tot = 0 for opcode in raw: count = float(opcode[3]) tot += count res[opcodes.index(opcode[5:-1])] = count else: tot = 1 res = [x / tot for x in res] op_freq[0].append(res) #non ponzi instances with open(database_nml_np, 'r') as f: raw_nml_np = f.readlines() with open(database_int_np, 'r') as f: raw_int_np = f.readlines() for add in op[1]: with open(database_op_np + add + '.json', 'r') as f: raw = f.readlines() res = [0 for i in range(len(opcodes))] if len(raw) > 1: tot = 0 for opcode in raw: count = float(opcode[3]) tot += count res[opcodes.index(opcode[5:-1])] = count else: tot = 1 res = [x / tot for x in res] op_freq[1].append(res) t2 = tl.compute_time(t1) with open(path + 'op_freq.json', 'w') as outfile: outfile.write(json.dumps(op_freq)) print('op_freq serialized') #tr_dico is a list of which the size is the number of SM, each element is a list of which the size #is the number of transactions, each element is a dictionnary containing data about a specific transacton. print("tools.open_data: create dictionnaries...") #ponzi instances addr = [raw_nml[2 * i][:-1] for i in range(len(raw_nml) // 2)] addr_int = [raw_int[2 * i][:-1] for i in range(len(raw_int) // 2)] addr_np = [raw_nml_np[2 * i][:-1] for i in range(len(raw_nml_np) // 2)] addr_int_np = [raw_int_np[2 * i][:-1] for i in range(len(raw_int_np) // 2)] N = len(op[0]) N_np = len(op[1]) tr_dico = [ #ponzi [[ ast.literal_eval(raw_nml[2 * addr.index(op[0][i]) + 1][:-1]), ast.literal_eval(raw_int[2 * addr_int.index(op[0][i]) + 1][:-1]) ] for i in range(N)], #non ponzi [[ ast.literal_eval(raw_nml_np[2 * addr_np.index(op[1][i]) + 1][:-1]), ast.literal_eval(raw_int_np[2 * addr_int_np.index(op[1][i]) + 1][:-1]) ] for i in range(N_np)] ] tl.compute_time(t2) temp = int(N_np / 3) #saved in three different files, because os.write and os.read doesn't support file with size superior to 2GB, ours is 4.2Gb. with open(path + 'tr_dico_nonponzi1.json', 'w') as f: f.write(json.dumps(tr_dico[1][:temp])) print('serialized half tr_dico') with open(path + 'tr_dico_nonponzi2.json', 'w') as f: f.write(json.dumps(tr_dico[1][temp:2 * temp])) with open(path + 'tr_dico_nonponzi3.json', 'w') as f: f.write(json.dumps(tr_dico[1][2 * temp:])) print('everything has been serialized') return tr_dico
for i in op[0]: size_info.append(os.path.getsize(path + 'bytecode/' + i + '.json')) for i in op[1]: size_info.append(os.path.getsize(path + 'bytecode_np/' + i + '.json')) #print(tr_dico) with open( path + 'op_freq.json', 'rb', ) as f: op_freq = json.loads(f.read()) #print(op_freq) t3 = tl.compute_time(t0) """ from dictionary to lists normal : { (0)'blockNumber': 'n', (1)'timeStamp': 'n' (2) 'hash': '0x..', (3) 'nonce': 'n', (4)'blockHash': '0x..e6', (5)'transactionIndex': '1', (6)'from': '0x..', (7)'to': '0x..', (8)'value': 'n', (9)'gas': 'n', (10)'gasPrice': 'n',
m.weight.data.zero_() self.cfg = fpn_cfg def forward(self, x, target=None): b_outs = [] x = self.stem(x) x = self.s1(x) for i in range(2, 5): x = getattr(self, f's{i}')(x) b_outs.append(x) return self.head(b_outs, target) def detnet_600m(fpn_cfg=None, pretrained=True): if pretrained: return DetNet( regnet_600M_config, fpn_cfg, regnet_weight_path='weights/pretrained/regnet_600m_741.pth' ) return DetNet(regnet_600M_config, fpn_cfg) if __name__ == "__main__": from thop import clever_format, profile import tools net = detnet_600m().cuda() inputs = torch.randn(1, 3, 512, 512).cuda() flops, params = profile(net, inputs=(inputs, ), verbose=False) flops, params = clever_format([flops, params], "%.3f") print('MACs: {}, params: {}'.format(flops, params)) avg_time = tools.compute_time(net, batch_size=16) print(f'{avg_time:.2f} ms')