cudnn.benchmark = True ''' Initialize weights from checkpoint ''' if args.resume: net, best_acc, start_acc = init_from_checkpoint(net) ''' Optimization ''' optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.5, mode='max', verbose=True) ''' Define passer ''' if not args.subset: passer_train = Passer(net, trainloader, criterion, device) else: passer_train = Passer(net, subsettrainloader, criterion, device) passer_test = Passer(net, testloader, criterion, device) ''' Define manipulator ''' manipulator = load_manipulator(args.permute_labels, args.binarize_labels) ''' Make intial pass before any training ''' loss_te, acc_te = passer_test.run() save_checkpoint(checkpoint={ 'net': net.state_dict(), 'acc': acc_te, 'epoch': 0 }, path='./checkpoint/' + ONAME + '/', fname='ckpt_trial_' + str(args.trial) + '_epoch_0.t7')
''' Prepare criterion ''' if args.dataset in [ 'cifar10', 'cifar10_gray', 'vgg_cifar10_adversarial', 'imagenet' ]: criterion = nn.CrossEntropyLoss() elif args.dataset in ['mnist', 'mnist_adverarial']: criterion = F.nll_loss ''' Define label manipulator ''' manipulator = load_manipulator(args.permute_labels, args.binarize_labels) ''' Instead of building graph on the entire set of nodes, pick a subset ''' if args.select_nodes: ''' get activations ''' subsettestloader = loader(args.dataset + '_train', batch_size=100, sampling=args.binarize_labels) passer = Passer(net, subsettestloader, criterion, device) manipulator = load_manipulator(args.permute_labels, args.binarize_labels) activs = passer.get_function() activs = signal_concat(activs) ''' get correct and wrong predictions ''' gts, preds = passer.get_predictions(manipulator=manipulator) labels = [int(x) for x in gts == preds] ''' compute discriminative nodes ''' nodes = np.concatenate( get_discriminative_nodes(np.transpose(activs), labels, 0.1)) else: nodes = np.arange(0, len(activs)) for epoch in args.epochs: print('==> Loading checkpoint for epoch {}...'.format(epoch))
criterion = nn.CrossEntropyLoss() ''' Define label manipulator ''' manipulator = load_manipulator(args.permute_labels, args.binarize_labels) for epoch in args.epochs: print('==> Loading checkpoint for epoch {}...'.format(epoch)) assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' checkpoint = torch.load('./checkpoint/' + args.net + '_' + args.dataset + '/ckpt_trial_' + str(args.trial) + '_epoch_' + str(epoch) + '.t7') net.load_state_dict(checkpoint['net']) ''' Define passer and get activations ''' functloader = loader(args.dataset + '_test', batch_size=100, subset=list(range(0, 1000))) passer = Passer(net, functloader, criterion, device) passer_test = Passer(net, functloader, criterion, device) passer_test.run(manipulator=manipulator) activs = passer.get_function() print('activs have shape {}'.format(signal_concat(activs).shape)) start = time.time() if args.partition == 'hardcoded': splits = signal_splitting(activs, args.split) elif args.partition == 'dynamic': splits = signal_partition(activs, n_part=args.split, binarize_t=0.5) print('Returning from signal_partition in {} secs'.format(time.time() - start)) elif args.partition == 'dynamic_from_structure': sadj = structure_from_view(net.module,
print(net) ''' Prepare criterion ''' criterion = nn.CrossEntropyLoss() ''' Define label manipulator ''' manipulator = load_manipulator(args.permute_labels, args.binarize_labels) for epoch in args.epochs: print('==> Loading checkpoint for epoch {}...'.format(epoch)) assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' checkpoint = torch.load('./checkpoint/' + args.net + '_' + args.dataset + '/ckpt_trial_' + str(args.trial) + '_epoch_' + str(epoch) + '.t7') net.load_state_dict(checkpoint['net']) ''' Define passer and get activations ''' functloader = loader(args.dataset + '_test', batch_size=100, subset=list(range(0, 1000))) passer = Passer(net, functloader, criterion, device) passer_test = Passer(net, functloader, criterion, device) passer_test.run(manipulator=manipulator) activs = passer.get_function() activs = signal_concat(activs) adj = adjacency(activs) print('The dimension of the adjacency matrix is {}'.format(adj.shape)) print('Adj mean {}, min {}, max {}'.format(np.mean(adj), np.min(adj), np.max(adj))) ''' Write adjacency to binary. To use as DIPHA input for persistence homology ''' save_dipha(SAVE_DIR + 'adj_epc{}_trl{}.bin'.format(epoch, args.trial), 1 - adj)
cudnn.benchmark = True ''' Initialize weights from checkpoint''' if args.resume: net, best_acc, start_acc = init_from_checkpoint(net) ''' Optimization ''' optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.5, mode='max', verbose=True) ''' Define passer''' if not args.subset: passer_train = Passer(net, trainloader, criterion, device) else: passer_train = Passer(net, subsettrainloader, criterion, device) passer_test = Passer(net, testloader, criterion, device) ''' Make intial pass before any training ''' loss_te, acc_te = passer_test.run() save_checkpoint(checkpoint={ 'net': net.state_dict(), 'acc': acc_te, 'epoch': 0 }, path='./checkpoint/' + ONAME, fname='ckpt_trial_' + str(args.trial) + '_epoch_0.t7') losses = []
manipulator = load_manipulator(args.permute_labels, args.binarize_labels) ''' Define partitions ''' print(25 * '-') print('----Perform partition---') print(25 * '-') print('==> Loading checkpoint for epoch {}...'.format(args.epochs[-1])) assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' checkpoint = torch.load('./checkpoint/' + args.net + '_' + args.dataset + '/ckpt_trial_' + str(args.trial) + '_epoch_' + str(args.epochs[-1]) + '.t7') net.load_state_dict(checkpoint['net']) ''' Define passer and get activations ''' functloader = loader(args.dataset + '_test', batch_size=100, subset=list(range(0, 1000))) passer = Passer(net, functloader, criterion, device) passer_test = Passer(net, functloader, criterion, device) passer_test.run() activs = passer.get_function() print('activs have shape {}'.format(signal_concat(activs).shape)) start = time.time() if args.partition == 'hardcoded': splits = signal_splitting(activs, args.split) elif args.partition == 'dynamic': node_splits, _ = signal_partition(activs, n_part=args.split, binarize_t=args.thresholds[0]) print('Returning from signal_partition in {} secs'.format(time.time() - start))
criterion = nn.CrossEntropyLoss() elif args.dataset in ['mnist', 'mnist_adverarial']: criterion = F.nll_loss for epoch in args.epochs: print('==> Loading checkpoint for epoch {}...'.format(epoch)) assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' checkpoint = torch.load('./checkpoint/' + args.net + '_' + args.dataset + '/ckpt_trial_' + str(args.trial) + '_epoch_' + str(epoch) + '.t7') net.load_state_dict(checkpoint['net']) ''' Define passer and get activations ''' functloader = loader(args.dataset + '_test', batch_size=100, subset=list(range(0, 1000))) passer = Passer(net, functloader, criterion, device) activs = passer.get_function() ''' If high number of nodes compute adjacency on layers and chunks''' ''' Treat all network at once or split it into chunks and treat each ''' if not args.split: activs = signal_concat(activs) adj = adjacency(activs) for threshold in THRESHOLDS: badj = binarize(np.copy(adj), threshold) print('t={} s={}'.format(threshold, np.sum(badj))) np.savetxt(SAVE_DIR + 'badj_epc{}_t{:1.2f}_trl{}.csv'.format( epoch, threshold, args.trial), badj, fmt='%d', delimiter=",")