def score_model(self, epoch): """ """ loss, self.train_z = self.model(self.positive_edges, self.negative_edges, self.y) score_positive_edges = torch.from_numpy( np.array(self.test_positive_edges, dtype=np.int64).T).type(torch.long).to(self.device) score_negative_edges = torch.from_numpy( np.array(self.test_negative_edges, dtype=np.int64).T).type(torch.long).to(self.device) test_positive_z = torch.cat( (self.train_z[score_positive_edges[0, :], :], self.train_z[score_positive_edges[1, :], :]), 1) test_negative_z = torch.cat( (self.train_z[score_negative_edges[0, :], :], self.train_z[score_negative_edges[1, :], :]), 1) scores = torch.mm(torch.cat((test_positive_z, test_negative_z), 0), self.model.regression_weights.to(self.device)) probability_scores = torch.exp(F.softmax(scores, dim=1)) predictions = probability_scores[:, 0] / probability_scores[:, 0:2].sum(1) predictions = predictions.cpu().detach().numpy() targets = [0] * len(self.test_positive_edges) + [1] * len( self.test_negative_edges) auc, f1 = calculate_auc(targets, predictions, self.edges) self.logs["performance"].append([epoch + 1, auc, f1])
def score_model(self, epoch): """ Score the model on the test set edges in each epoch. :param epoch: Epoch number. #self.z is updated representation of the nodes. Representation for all the nodes in the graph are obtained here. """ loss, self.train_z = self.model(self.positive_edges, self.negative_edges, self.y) score_positive_edges = torch.from_numpy( np.array(self.test_positive_edges, dtype=np.int64).T).type(torch.long).to(self.device) score_negative_edges = torch.from_numpy( np.array(self.test_negative_edges, dtype=np.int64).T).type(torch.long).to(self.device) test_positive_z = torch.cat( (self.train_z[score_positive_edges[0, :], :], self.train_z[score_positive_edges[1, :], :]), 1) #get negative and positive representations for the test edges test_negative_z = torch.cat( (self.train_z[score_negative_edges[0, :], :], self.train_z[score_negative_edges[1, :], :]), 1) score_a = torch.cat((test_positive_z, test_negative_z), 0) scores = torch.mm(score_a, self.model.regression_weights.to(self.device)) scores = torch.mm(scores, self.model.fc_weights.to(self.device)) probability_scores = torch.exp(F.softmax(scores, dim=1)) predictions = probability_scores[:, 0] / probability_scores[:, 0:2].sum(1) predictions = predictions.cpu().detach().numpy() targets = [0] * len(self.test_positive_edges) + [1] * len( self.test_negative_edges) auc, f1, _, precision, recall, accuracy = calculate_auc( targets, predictions, self.edges) self.logs["performance"].append( [epoch + 1, auc, f1, precision, recall, accuracy]) if (self.end_epoch): with open('score_positive_edges.pkl', 'wb') as h: pickle.dump(score_positive_edges, h) with open('score_negative_edges.pkl', 'wb') as k: pickle.dump(score_negative_edges, k)
def score_model(self, epoch): loss, self.train_z = self.model(self.positive_edges, self.negative_edges, self.y, self.train_indice) test_z_indice = self.nodes_dict['indice'][self.test_indice] test_hidden = self.train_z[test_z_indice] scores = torch.mm(test_hidden, self.model.regression_weights.to(self.device)) # probability_scores = torch.exp(F.softmax(scores, dim=1)) predictions = F.softmax(scores, dim=1) predictions = predictions.cpu().detach().numpy() test_target = self.y[self.test_indice] test_target = test_target.cpu().detach().numpy() auc, f1 = calculate_auc(test_target, predictions[:, 1], self.edges) self.logs["performance"].append([epoch + 1, auc, f1]) return auc
raw=is_tta) dataloader = torch.utils.data.DataLoader(test_dataset, shuffle=False, batch_size=1) import torchvision.models as models # model = models.resnet152(num_classes=5) # model.conv1 = nn.Conv2d(1, 64, 7, stride=2, padding=3, bias=False) # model.load_state_dict(torch.load("/home/ted/Projects/no_model_98_0.2959_0.4846.pth")) from densenet import se_densenet121 model = se_densenet121(pretrained=False, num_channels=1, num_classes=5) model.load_state_dict(torch.load("/home/ted/no_se_model_best_auc_21.pth")) model.eval() model.cuda() tta = TestTimeAugmentation(model, apply_augmentations_tta, 5) outputs = np.array([[0., 0., 0., 0., 0.]]) labels = np.array([[0., 0., 0., 0., 0.]]) with torch.no_grad(): for img, label in dataloader: if is_tta: img = np.squeeze(img, 0).cpu().numpy() output = tta.predict(img) else: img, label = img.float().cuda(), label.float().cuda() output = model(img) output = torch.sigmoid(output) outputs = np.vstack((outputs, output.detach().cpu().numpy())) labels = np.vstack((labels, label.detach().cpu().numpy())) auc, auc_binarized = calculate_auc(outputs[1:], labels[1:], 0.5) print(auc, auc_binarized)
train_loss += unsupervised_loss if i % 50 == 0: writer.add_scalars( "Loss_Train", { 'L_sup': supervised_loss.item(), 'L_unsup': unsupervised_loss.item() }, epoch * len(labeled_dataloader) + i) total_train_loss += train_loss train_loss.backward() optimizer.step() scheduler.step() auc, _ = calculate_auc(sup_output.detach().cpu(), label.detach().cpu()) e_t = time.time() n_seconds_per_iter = e_t - s_t if i % 50 == 0: print( f'[{epoch} - {i}/{len(labeled_dataloader)}] L_train: {train_loss.item():.4f} ROC: {auc:.4f} bestROC: {max_auc:.4f} | duration: {n_seconds_per_iter:.2f} s/it' ) writer.add_scalar('L_train', train_loss.item(), epoch * len(labeled_dataloader) + i) if auc != -1: writer.add_scalar('AUC', auc, epoch * len(labeled_dataloader) + i) # start saving best model (train_loss) after 1000th iterations if (i > 500 or epoch > 0) and train_loss <= min_train_loss: min_train_loss = train_loss
if wait >= es_patience: print('Epoch {}: early stopping'.format(epoch)) break wait += 1 reg = 0.0005 init = 'random_normal' fea_test, test_y, test_u = get_week_data(embed_test_now, u_test, y_test, hidden_units) fea_train, train_y, train_u = get_week_data(embed_train_now, u_train, y_train, hidden_units) manual_test = get_manual_features(test_u) manual_train = get_manual_features(train_u) manual_fea_col = manual_test.shape[1] m = multi_trj_attention(7, hidden_units + 5, reg, init, manual_fea_col) m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) m.fit([fea_train, manual_train], train_y, epochs=1000, batch_size=64, validation_data=([fea_test, manual_test], test_y), verbose=0) pred = m.predict([fea_test, manual_test], batch_size=64) pred = pred.T.tolist()[0] auc_score = calculate_auc(test_u, pred, test_y) f_record.write("auc\t{}\n".format(auc_score)) f_record.close()
def val_epoch(epoch, data_loader, model, criterion, opt, logger): print('validation at epoch {}'.format(epoch)) outputs_epoch = torch.FloatTensor([1.0]) targets_epoch = torch.LongTensor([1.0]) model.eval() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() accuracies = AverageMeter() aucs = AverageMeter() end_time = time.time() for i, (inputs, targets) in enumerate(data_loader): data_time.update(time.time() - end_time) if not opt.no_cuda: targets = targets.cuda(async=True) inputs = inputs.cuda(async=True) #print(inputs.shape) inputs = Variable(inputs, volatile=True) targets = Variable(targets, volatile=True) outputs = model(inputs) loss = criterion(outputs, targets) acc = calculate_accuracy(outputs, targets) auc = calculate_auc(outputs[:, 0].cpu(), targets.cpu()) losses.update(loss.item(), inputs.size(0)) accuracies.update(acc, inputs.size(0)) aucs.update(auc, inputs.size(0)) batch_time.update(time.time() - end_time) end_time = time.time() # if not opt.no_softmax_in_test: # outputs = F.softmax(outputs) # outputs_epoch = torch.cat((outputs_epoch, outputs[:,0].cpu()), dim=0) # print(outputs_epoch.shape, targets_epoch.shape) # targets_epoch = torch.cat((targets_epoch, targets.cpu()), dim=0) # # print(outputs_epoch.shape, targets_epoch.shape) # print('batch:', i, 'acc:', acc) del outputs, targets # outputs_epoch = outputs_epoch[1:-1] # targets_epoch = targets_epoch[1:-1] # auc = calculate_auc(outputs_epoch, targets_epoch) # print('auc', auc, accuracies.avg) print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc {acc.val:.3f} ({acc.avg:.3f})\t' 'Auc {auc.val:.3f} ({auc.avg:.3f})'.format(epoch, i + 1, len(data_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc=accuracies, auc=aucs)) logger.log({ 'epoch': epoch, 'loss': losses.avg, 'acc': accuracies.avg, 'auc': aucs.avg }) return losses.avg
def train_epoch(epoch, data_loader, model, criterion, optimizer, opt, epoch_logger, batch_logger): print('train at epoch {}'.format(epoch)) model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() accuracies = AverageMeter() aucs = AverageMeter() end_time = time.time() for i, (inputs, targets) in enumerate(data_loader): data_time.update(time.time() - end_time) if not opt.no_cuda: targets = targets.cuda(async=True) inputs = inputs.cuda(async=True) inputs = Variable(inputs) targets = Variable(targets) outputs = model(inputs) loss = criterion(outputs, targets) acc = calculate_accuracy(outputs, targets) auc = calculate_auc(outputs[:,0].cpu(), targets.cpu()) losses.update(loss.item(), inputs.size(0)) accuracies.update(acc, inputs.size(0)) aucs.update(auc, inputs.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end_time) end_time = time.time() batch_logger.log({ 'epoch': epoch, 'batch': i + 1, 'iter': (epoch - 1) * len(data_loader) + (i + 1), 'loss': losses.val, 'acc': accuracies.val, 'auc': aucs.val, 'lr': optimizer.param_groups[0]['lr'] }) print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc {acc.val:.3f} ({acc.avg:.3f})\t' 'Auc {auc.val:.3f} ({auc.avg:.3f})'.format( epoch, i + 1, len(data_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc=accuracies, auc=aucs)) epoch_logger.log({ 'epoch': epoch, 'loss': losses.avg, 'acc': accuracies.avg, 'auc': aucs.avg, 'lr': optimizer.param_groups[0]['lr'] }) if epoch % opt.checkpoint == 0: save_file_path = os.path.join(opt.result_path, 'save_{}.pth'.format(epoch)) states = { 'epoch': epoch + 1, 'arch': opt.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), } torch.save(states, save_file_path)