def test_accuracy(test_loader, net, config, attack=None): top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') for _, (images, labels) in tqdm(enumerate(test_loader, start=1)): images = images.to(device=config['device']) labels = labels.to(device=config['device']) if attack is not None: images = attack.perturb(images, labels) output = net(images) acc1, acc5 = accuracy(output, labels, topk=(1, 5)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) log_str = '* Test Accuracy:\t' log_str += '\tAcc@1 {top1.avg:6.2f} '.format(top1=top1) log_str += '\tAcc@5 {top5.avg:6.2f} '.format(top5=top5) log_str += '\n' print(log_str) return top1.avg
def test_k_nearest_neighbor(): datasets = [ os.path.join('data', x) for x in os.listdir('data') if os.path.splitext(x)[-1] == '.json' ] aggregators = ['mean', 'mode', 'median'] distances = ['euclidean', 'manhattan', 'cosine'] for data_path in datasets: # Load data and make sure its shape is correct features, targets = load_json_data(data_path) targets = targets[:, None] # expand dims for d in distances: for a in aggregators: # make model and fit knn = KNearestNeighbor(1, distance_measure=d, aggregator=a) knn.fit(features, targets) # predict and calculate accuracy labels = knn.predict(features) acc = accuracy(targets, labels) # error if there's an issue msg = 'Failure with dataset: {}. Settings: dist={}, agg={}.'.format( data_path, d, a) assert (acc == 1.0), msg
def test_accuracy(): from sklearn.metrics import accuracy_score from src import accuracy y_true, y_pred = make_fake_data() _actual = accuracy_score(y_true, y_pred) _est = accuracy(y_true, y_pred) assert np.allclose(_actual, _est)
def train(train_loader, net, criterion, log_file, optimizer, epoch, PGD=None, config=None): batch_time = AverageMeter('Time', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') progress = ProgressMeter(len(train_loader), [batch_time, losses, top1, top5], prefix="Epoch: [{}]".format(epoch)) end = time.time() for batch_idx, (images, labels) in enumerate(train_loader, start=1): images = images.to(device=config['device']) labels = labels.to(device=config['device']) if PGD is not None: if config['pgd_label'] == 0: images = PGD.perturb(images, labels) else: pred_labels = net(images).max(1, keepdim=True)[1].squeeze_() images = PGD.perturb(images, pred_labels) outputs = net(images) loss = criterion(outputs, labels) acc1, acc5 = accuracy(outputs, labels, topk=(1, 5)) losses.update(loss.data, images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() if batch_idx == 1 or batch_idx % 400 == 0: progress.display(batch_idx) write_log(log_file, progress.log_str(batch_idx)) return top1.avg
def valid(valid_loader, net, criterion, log_file, config=None): batch_time = AverageMeter('Time', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') with torch.no_grad(): end = time.time() for _, (images, labels) in enumerate(valid_loader, start=1): images = images.to(device=config['device']) labels = labels.to(device=config['device']) output = net(images) loss = criterion(output, labels) acc1, acc5 = accuracy(output, labels, topk=(1, 5)) losses.update(loss.data, images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) batch_time.update(time.time() - end) end = time.time() log_str = '* Validation:\t' log_str += '\tTime {batch_time.avg:6.3f} '.format( batch_time=batch_time) log_str += '\tLoss {losses.avg:.4e} '.format(losses=losses) log_str += '\tAcc@1 {top1.avg:6.2f} '.format(top1=top1) log_str += '\tAcc@5 {top5.avg:6.2f} '.format(top5=top5) log_str += '\n' print(log_str) write_log(log_file, log_str) return top1.avg