def main(opt): train_dataset = dataloader("processedData13C_NEW.pickle", "train", 12582) # test_dataset = bAbIDataset(opt.dataroot, opt.question_id, False) # test_dataloader = bAbIDataloader(test_dataset, batch_size=opt.batchSize, \ # shuffle=False, num_workers=2) opt.annotation_dim = 1 # for bAbI #opt.n_edge_types = train_dataset.n_edge_types #opt.n_node = train_dataset.n_node net = GGNN(opt) net.double() print(net) criterion = nn.MSELoss() if opt.cuda: net.cuda() criterion.cuda() LR = opt.lr #optimizer = optim.Adam(net.parameters(), lr=LR ) optimizer = optim.Adam(net.parameters(), lr=LR, weight_decay=0.0015) for epoch in range(0, opt.niter): train(epoch, train_dataset, net, criterion, optimizer, opt) #adjust_learning_rate(optimizer, epoch, LR) if epoch % 1000 == 0: test_dataset = dataloader("processedData13C_NEW.pickle", "val", 12582) test_mse = test(test_dataset, net, criterion, optimizer, opt) del (test_dataset) #adjust_learning_rate(optimizer, epoch, LR, test_mse) if test_mse < 1.75: print("TEST_SET") test_dataset1 = dataloader("processedData13C_NEW.pickle", "tst", 12582) test_mse1 = test(test_dataset1, net, criterion, optimizer, opt) del (test_dataset1) if test_mse < 1.73: torch.save(net, "d:/rotation2/datasets/ggnn_net4.pkl") print(epoch)
def __init__(self, model, loss='DiceBCE'): df_path = '../dataset/train.csv' root = osp.dirname(df_path) self.num_workers = 6 self.batch_size = {'train': 3, 'val': 3} self.accumlation_steps = 32 // self.batch_size['train'] self.lr = 5e-4 self.num_epochs = 40 self.phases = ['train', 'val'] self.device = torch.device( 'cuda:0' if torch.cuda.is_available() else 'cpu') self.net = model.to(self.device) self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr) self.scheduler = ReduceLROnPlateau(self.optimizer, mode='min', patience=3, verbose=True) if loss == 'BCE': self.criterion = nn.BCELoss() elif loss == 'Dice': self.criterion = DiceLoss() elif loss == 'DiceBCE': self.criterion = DiceBCELoss() elif loss == 'IoU': self.criterion = IoULoss() elif loss == 'Focal': self.criterion = FocalLoss() elif loss == 'Tanimoto': self.criterion = TanimotoLoss() else: raise ValueError('Loss Function is not Defined') return self.dataloaders = { phase: dataloader(root=root, df_path=df_path, phase=phase, batch_size=self.batch_size[phase], num_workers=self.num_workers) for phase in self.phases } self.best_loss = float('inf') self.losses = {phase: [] for phase in self.phases} self.dice_scores = {phase: [] for phase in self.phases} self.iou_scores = {phase: [] for phase in self.phases} self.client = line_notify.LineNotify( token='buNeQjYHp6sXPdwk1sMWUCmqLQr7z7czjLozKJdtevL')
avd = [] print('\n\n========================================================================') print('==============================================================') print('====================================================') for round in rounds: target=attack+'_'+classifier+'_'+feature+'_'+str(round) excel_row=[] # adv_samples = [f for f in listdir(adv_sample_path) if isfile(join(adv_sample_path,f))] # for file in adv_samples: # x_mal=np.concatenate([x_mal,np.load(adv_sample_path+file)]) # print(x_mal.shape[0]) x_ben = x_benign[0:len(x_mal)] y_mal = np.ones(x_mal.shape[0]) y_ben = np.zeros(x_ben.shape[0]) train_data = [(x_mal, y_mal), (x_ben, y_ben)] dataloader = dataload.dataloader(x_malware, x_benign) # print('\n DATA SHAPE:') print('Train --> [(x_mal:%d, y_mal:%d), (x_ben:%d, y_ben:%d)]' % ( x_mal.shape[0], y_mal.shape[0], x_ben.shape[0], y_ben.shape[0])) # print('Test --> [(x:%d, y:%d)]' % (test_data[0].shape[0], test_data[1].shape[0])) ######EXEL###### excel_row.append(target) excel_row.append(x_mal.shape[0]) print('\n---------------------') print('\n EXPERIMENT ON <<' + target + '>> CLASSIFIER...') # for arc in architechtures: ######EXEL###### # print('\n====================================') # print('SURROGATE <<'+ str(arc)+ '>> CLASSIFIER:')
# in black box setting classifiers = [ 'SVM' ] #, 'SVM_R1', 'SVM_R2', 'SVM_R3', 'SVM_R4', 'SVM_R5', 'SVM_R6', 'SVM_R7', 'SVM_R8', 'SVM_R9', 'SVM_R10', 'SVM_R11' , 'SVM_R12', 'SVM_R13', 'SVM_R14', 'SVM_R15']# ,'RF' , 'RBF_SVM', 'LR', 'DT' , 'KNN', 'MLP']# ,'DNN'] excel_rows = [] for type in classifiers: ######EXEL###### excel_row = [type] print('\n====================================') print('EXPERIMENT ON <<' + type + '>> CLASSIFIER:') x_ben = x_benign[0:len(x_mal)] y_mal = np.ones(x_mal.shape[0]) y_ben = np.zeros(x_ben.shape[0]) train_data = [(x_mal, y_mal), (x_ben, y_ben)] dataloader = dataload.dataloader(x_mal, x_ben) print('\n DATA SHAPE:') print('Train --> [(x_mal:%d, y_mal:%d), (x_ben:%d, y_ben:%d)]' % (x_mal.shape[0], y_mal.shape[0], x_ben.shape[0], y_ben.shape[0])) print('Test --> [(x:%d, y:%d)]' % (x_test.shape[0], y_test.shape[0])) ######EXEL###### excel_row.append(x_mal.shape[0]) #train original classifier target_model = malware_classifeir(type=type) target_model.train(train_data) # ######EXEL###### # excel_row.append(accuracy_score(np.concatenate([y_mal, y_ben]), # target_model.model.predict(np.concatenate([x_mal, x_ben]))))