コード例 #1
0
def gen_attack_log(args):
    if not os.path.isdir('logs_attack'):
        os.mkdir('logs_attack')
    logname = ('logs_attack/ctri_%s_%s_%s.csv' %
               (args.data, args.model, args.name))

    if os.path.exists(logname):
        with open(logname, 'a') as logfile:
            log_row(logname, [''])
            log_row(logname, [''])

    with open(logname, 'a') as logfile:
        logwriter = csv.writer(logfile, delimiter=',')
        logwriter.writerow([
            'model type', 'data set', 'random seed',
            'number of points in one batch', 'number of points in one object',
            'model load path', 'steps of gradient-like attack',
            'step size (lr) fo gradient-like attack', 'number of test objects',
            'penalty coefficient', 'target or not', 'kappa for CW',
            'number of repeat initial attacks',
            'number of divisions for theta', 'range of angle'
        ])
        logwriter.writerow([
            args.model, args.data, args.seed, args.attack_batch_size,
            args.num_points, args.model_path, args.num_steps, args.step_size,
            args.num_tests, args.LAMBDA, args.target, args.kappa,
            args.num_init, args.d, args.a, args.b
        ])
        logwriter.writerow(['Note', args.note])
        logwriter.writerow([''])

    return logname
コード例 #2
0
def gen_train_log(args):
    if not os.path.isdir('logs_train'):
        os.mkdir('logs_train')
    logname = ('logs_train/%s_%s_%s.csv' % (args.data, args.model, args.name))

    if os.path.exists(logname):
        with open(logname, 'a') as logfile:
            log_row(logname, [''])
            log_row(logname, [''])

    with open(logname, 'a') as logfile:
        logwriter = csv.writer(logfile, delimiter=',')
        logwriter.writerow([
            'model type', 'data set', 'seed', 'train batch size',
            'number of points in one batch', 'number of epochs', 'optimizer',
            'learning rate', 'resume checkpoint path', 'feature transform',
            'lambda for feature transform regularizer', 'data augment'
        ])
        logwriter.writerow([
            args.model, args.data, args.seed, args.batch_size, args.num_points,
            args.epochs, args.optimizer, args.lr, args.resume,
            args.feature_transform, args.lambda_ft, args.augment
        ])
        logwriter.writerow(['Note', args.note])
        logwriter.writerow([''])
コード例 #3
0
def log_penalty(logname, i, penalties, iso_penalties):
    log_row(logname, [
        'Max penalty', 'Average penalty in total', 'Variance in total',
        'Average penalty over nonzeros', 'Variance over nonzeros'
    ])
    if iso_penalties != []:
        log_row(logname, [
            max(penalties), 1. * sum(penalties) / (i + 1),
            np.var(penalties), 1. * sum(iso_penalties) / (len(iso_penalties)),
            np.var(iso_penalties)
        ])
        print(max(penalties), 1. * sum(penalties) / (i + 1), np.var(penalties),
              1. * sum(iso_penalties) / (len(iso_penalties)),
              np.var(iso_penalties))
    else:
        log_row(logname, [
            max(penalties), 1. * sum(penalties) / (i + 1),
            np.var(penalties), '/', '/'
        ])
        print(max(penalties), 1. * sum(penalties) / (i + 1), np.var(penalties),
              '/', '/')
コード例 #4
0
def log_thompson(logname, thompson):
    theta = thompson.alpha / (thompson.alpha + thompson.beta)
    log_row(logname, ['sample region infomation'])
    indx = np.argsort(-theta, axis=None)
    log_row(logname, -np.sort(-theta, axis=None).reshape(-1))
    log_row(logname, indx)
    print(-np.sort(-theta, axis=None).reshape(-1)[0:9])
    print(indx[0:9])

    log_row(logname, ['to axis'])
    axis_indx = np.unravel_index(np.argsort(-theta, axis=None), theta.shape)
    log_row(logname, axis_indx[0])
    log_row(logname, axis_indx[1])
    log_row(logname, axis_indx[2])
    print(axis_indx[0][0:9])
    print(axis_indx[1][0:9])
    print(axis_indx[2][0:9])
コード例 #5
0
    attack_loader = DataLoader(data,
                               num_workers=8,
                               batch_size=args.attack_batch_size,
                               shuffle=True,
                               drop_last=True)

    logname = gen_attack_log(args)

    it = iter(attack_loader)
    corrects = []
    penalties = []
    iso_penalties = []

    log_row(logname, [
        'Test number', 'True label', 'Prob before', 'Prob after',
        'Classification label', 'Prob after', 'Penalty', 'steps taken',
        'Success Rate', 'Inital Rate'
    ])
    init_suc = 0
    attack_times = 0

    thompson = ts.BernThompson(ts.environment(d=args.d, a0=args.a, b0=args.b))

    save_times = 0
    for i in range(args.num_tests):
        obj, label = next(it)
        obj, label = obj.to(device), label.to(device)[:, 0]

        obj = obj.transpose(2, 1)

        _, correct, rates, indices = logits_info(obj, label, model)
コード例 #6
0
    print('======> Successfully loaded!')


    gen_train_log(args)
    logname = ('logs_train/%s_%s_%s.csv' % (args.data, args.model, args.name)) 

    ########################################
    ## Train
    ########################################
    if args.model == 'dgcnn':
        criterion = cal_loss
    else:
        criterion = F.cross_entropy #nn.CrossEntropyLoss()

    if args.resume  == '/':
        log_row(logname,['Epoch', 'Train Loss', 'Train Acc', 'Test Loss', 'Test Acc', 'learning Rate'])    
    
    model.train()
    for epoch in range(START_EPOCH, args.epochs):
        print('\nEpoch: %d' % epoch)
        optimizer.param_groups = adjust_lr_steep(args.lr, optimizer.param_groups, epoch, args.adj_lr)

        correct = 0
        total = 0
        for i, data in enumerate(train_loader, 0):
            points, label = data
            points, label = points.to(device), label.to(device)[:,0]
            

            if  args.model == 'rscnn':
                fps_idx = pointnet2_utils.furthest_point_sample(points, args.num_points)  # (B, npoint)