def run(features: List[BaseParams],
        name,
        verbose=False,
        with_save=False,
        with_scaling=False):
    game = create_game_from_feature_names(features,
                                          verbose,
                                          mode="train",
                                          with_scaling=with_scaling)
    m_train = game.play()

    game = create_game_from_feature_names(features,
                                          verbose,
                                          mode="test",
                                          with_scaling=with_scaling)
    m_test = game.play()

    new_fnames = [fn.get_features_names_str() for fn in features]
    feature_names_str = ', '.join(new_fnames)

    from utils.metrics import Result
    params = StackedParams(features=features,
                           classifier="consensus",
                           dirname="new",
                           pca=False,
                           use_only_ab=False,
                           to_binary=False,
                           params={'with_scaling': with_scaling})
    r = Result(metrics=m_test, params=params)
    print(m_test.to_string())
    if with_save:
        r.save_result()
    return m_test.accuracy, (feature_names_str, m_train.accuracy * 100,
                             m_test.accuracy * 100)
Exemple #2
0
def run(features: List[BaseParams], verbose, with_save, name):
    max_acc = 0.
    max_x = 0.
    min_aed = 100.
    min_x = 0.
    metrics = None
    import numpy as np
    for x in np.arange(2, 8, 0.1):
        game = create_game(features, x, verbose=verbose)
        m = game.play()
        if m.accuracy > max_acc:
            max_acc = m.accuracy
            max_x = x
            metrics = m
        if m.aed_score < min_aed:
            min_aed = m.aed_score
            min_x = x
    print("max_acc", max_acc, max_x)
    print("min_aed", min_aed, min_x)
    from utils.metrics import Result
    from ebm.filenames import auctioning_name
    name = auctioning_name + '_on_' + name
    r = Result(metrics=metrics,
               params=StackedParams(classifier=name, dirname=name, features=features, params={}, to_binary=False,
                                    use_only_ab=False, pca=False))
    print(r.metrics.to_string())
    if with_save:
        r.save_result()
    return max_acc
Exemple #3
0
def train(train_loader, model, criterion, optimizer, epoch, attacker):
    average_meter = AverageMeter()
    model.train()  # switch to train mode
    end = time.time()

    batch_num = len(train_loader)

    for i, (input, target) in enumerate(train_loader):
        # itr_count += 1
        input, target = input.cuda(), target.cuda()

        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute pred
        end = time.time()

        # get adversary for adversarial training
        if attacker is not None:
            input = attacker(input, target)

        pred = model(input)

        loss = criterion(pred, target)
        optimizer.zero_grad()
        loss.backward()  # compute gradient and do SGD step
        optimizer.step()
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        if (i + 1) % args.print_freq == 0:
            print('=> output: {}'.format(output_directory))
            print('Train Epoch: {0} [{1}/{2}]\t'
                  't_Data={data_time:.3f}({average.data_time:.3f}) '
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'Loss={Loss:.5f} '
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'RML={result.absrel:.2f}({average.absrel:.2f}) '
                  'Log10={result.lg10:.3f}({average.lg10:.3f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'Delta2={result.delta2:.3f}({average.delta2:.3f}) '
                  'Delta3={result.delta3:.3f}({average.delta3:.3f})'.format(
                      epoch, i + 1, len(train_loader), data_time=data_time,
                      gpu_time=gpu_time, Loss=loss.item(), result=result, average=average_meter.average()), flush=True)
            current_step = epoch * batch_num + i

    avg = average_meter.average()
Exemple #4
0
def validate(val_loader, model, epoch):
    average_meter = AverageMeter()

    model.eval()  # switch to evaluate mode

    end = time.time()

    skip = len(val_loader) // 9  # save images every skip iters

    for i, (input, target) in enumerate(val_loader):

        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)

        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)

        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        if args.dataset == 'kitti':
            rgb = input[0]
            pred = pred[0]
            target = target[0]
        else:
            rgb = input

        if i == 0:
            img_merge = utils.merge_into_row(rgb, target, pred)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            # filename = output_directory + '/comparison_' + str(epoch) + '.png'
            # utils.save_image(img_merge, filename)
            pass

        if (i + 1) % 100 == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'RML={result.absrel:.2f}({average.absrel:.2f}) '
                  'Log10={result.lg10:.3f}({average.lg10:.3f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'Delta2={result.delta2:.3f}({average.delta2:.3f}) '
                  'Delta3={result.delta3:.3f}({average.delta3:.3f})'.format(
                      i + 1, len(val_loader), gpu_time=gpu_time, result=result, average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'Rel={average.absrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          't_GPU={time:.3f}\n'.format(
              average=avg, time=avg.gpu_time), flush=True)

    return avg, img_merge
Exemple #5
0
          'Rel={average.absrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          't_GPU={time:.3f}\n'.format(
              average=avg, time=avg.gpu_time), flush=True)

    return avg, img_merge


if __name__ == '__main__':
    #max_perturb = 6.0
    #iterations = 10
    #alpha = 1.0
    TI = False
    k = 5

    args = utils.parse_command()
    print(args)

    mifgsm_params = {'eps': args.epsilon, 'steps': args.iterations,
                     'decay': 1.0, 'alpha': args.alpha, 'TI': TI, 'k': k}
    pgd_params = {'norm': 'inf', 'eps': args.epsilon, 'alpha': args.alpha,
                  'iterations': args.iterations, 'TI': TI, 'k': k}

    best_result = Result()
    best_result.set_to_worst()

    main()
def validate(val_loader,
             model,
             segm_model=None,
             attacker=None,
             save_img_dir=None,
             num_imgs_to_save=None):
    average_meter = AverageMeter()

    model.eval()  # switch to evaluate mode
    targeted_metrics = {'rmse': [], 'absrel': [], 'log10': []}

    end = time.time()

    skip = len(val_loader) // 9  # save images every skip iters

    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()

        # Get Adversary function
        adv_input, segm = get_adversary(input, target, segm_model, attacker)

        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            if args.model == 'adabins':
                _, pred = model(adv_input)
            else:
                pred = model(adv_input)

        # Post-processing for few of the models
        pred = post_process(pred)

        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        if args.targeted:
            rmse, absrel, log10 = result.targeted_eval(pred.data.squeeze(1),
                                                       target.data.squeeze(1),
                                                       segm)
            if rmse != float('nan'):
                targeted_metrics['rmse'].append(rmse)
            if absrel != float('nan'):
                targeted_metrics['absrel'].append(absrel)
            if log10 != float('nan'):
                targeted_metrics['log10'].append(log10)

        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        if args.dataset == 'kitti':
            rgb = adv_input[0]
            target = target[0]
            pred = pred[0]
        else:
            rgb = input

        if i == 0:
            img_merge = utils.merge_into_row(rgb, target, pred)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred)
            img_merge = utils.add_row(img_merge, row)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'RML={result.absrel:.2f}({average.absrel:.2f}) '
                  'Log10={result.lg10:.3f}({average.lg10:.3f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'Delta2={result.delta2:.3f}({average.delta2:.3f}) '
                  'Delta3={result.delta3:.3f}({average.delta3:.3f})'.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

        # save images only if we're not testing on already saved images
        if save_img_dir is not None and args.dataset != 'saved_images':
            img = adv_input[0]
            depth = target[0]
            save_image(img,
                       os.path.join(save_img_dir, 'imgs', '{}.png'.format(i)))
            save_image(depth,
                       os.path.join(save_img_dir, 'gt', '{}.png'.format(i)))

        if save_img_dir is not None and i > num_imgs_to_save:
            break

    avg = average_meter.average()

    if args.targeted:
        avg_rmse = sum(targeted_metrics['rmse']) / \
            len(targeted_metrics['rmse'])
        avg_absrel = sum(targeted_metrics['absrel']) / \
            len(targeted_metrics['absrel'])
        avg_log10 = sum(targeted_metrics['log10']) / \
            len(targeted_metrics['log10'])

        print('\n*\n'
              'RMSE={}\n'
              'Rel={}\n'
              'Log10={}\n'.format(avg_rmse, avg_absrel, avg_log10))

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'Rel={average.absrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    return avg, img_merge