Ejemplo n.º 1
0
def black_box_eval(files, dataset, test_loader, epsilon, alpha):
    '''
    list of models to use for black-box attacks
    :param duos:
    :return:
    '''
    res = []

    model_s, optimizer_s, use_cuda_s = init(dataset, 0.001)
    model_t, optimizer_t, use_cuda_t = init(dataset, 0.001)
    for i, target in enumerate(files):
        sres = [target, [], [], []]
        for j, source in enumerate(files):
            if j == i:
                continue
            print(f"attacking {target} from {source}")
            model_s, optimizer_s, _ = load_check_point(source, model_s,
                                                       optimizer_s)
            model_t, optimizer_t, _ = load_check_point(target, model_t,
                                                       optimizer_t)

            accs = [target]
            try:
                print("running fgsm")
                accs.append(
                    fgsm_attack(test_loader,
                                model_s,
                                True,
                                F.cross_entropy,
                                test_batch,
                                init_ep=epsilon,
                                max_range=1,
                                target_model=model_t))
                for it in [20, 100]:
                    accs.append(
                        invoke_pgd_attack(test_loader,
                                          model_s,
                                          True,
                                          epsilon,
                                          alpha,
                                          F.cross_entropy,
                                          test_batch,
                                          iters=it,
                                          target_model=model_t))
                res.append(accs)
            except Exception as e:
                print(f"Exception model: {e}")
                res.append(sres)
                continue
    print(f"res:\n {res}")
    with open(f"blackboxdataset{dataset}", "wb") as f:
        pickle.dump(res, f)
Ejemplo n.º 2
0
def white_box_eval(files, dataset, test_loader, epsilon, alpha):
    '''

    :param files: list of models path to evaluate
    :return:
    '''

    res = []

    model, optimizer = init(dataset, 0.001)
    for f in files:
        print(f"attacking {f}")
        model, optimizer, _ = load_check_point(f, model, optimizer)
        accs = []
        for it in [20, 40, 100]:
            accs.append(
                invoke_pgd_attack(test_loader,
                                  model,
                                  True,
                                  epsilon,
                                  alpha,
                                  F.cross_entropy,
                                  test_batch,
                                  iters=it))
        res.append(accs)

    print(f"res:\n {res}")
Ejemplo n.º 3
0
def experiment(train_id, continue_ex=False):
    """"""
    if continue_ex:
        net, mdl, params = load_check_point(train_id, path='results/')
        logger = tblog.Logger('runs/{}'.format(train_id))
        with h5py.File(params['data_fn']) as hf:
            train(mdl, hf, params, logger)
            f1, ll = evaluate(mdl, hf, params)
            save_check_point(net, params, train_id, path='results/')
    else:
        # load config file
        params = json.load(open('config/{}.json'.format(train_id)))
        tid, f1, ll = ex(**params)

    print {'f1': f1, 'll': ll}
Ejemplo n.º 4
0
def feature_ext(train_id, data_fn, out_path,
                agg=[np.mean, np.std, np.median, np.amin, np.amax],
                target='tg', model_path='results/', distributed=False):
    """"""
    print 'Extracting!...'
    net, mdl, params = load_check_point(train_id, path=model_path)
    if distributed:
        print 'Compile Distributed Feature Function!...'
        mdl = build_dist_feat_fnc(net, target)

    with h5py.File(data_fn, 'r') as hf:
        Z, y, dset, ids = feature(mdl, hf, params, target, agg)  # feature

    print 'Saving output...'
    out_fn = os.path.join(out_path, train_id + '_{}.dat.gz'.format(target))
    joblib.dump((Z, y, dset, ids), out_fn)
Ejemplo n.º 5
0
def change_comparison_plot(model_path):
    model_base_path = get_base_path.search(model_path).groups()[0]
    with open(os.path.join(model_base_path, "data.pickle"), 'rb') as f:
        (x_train, x_test, y_train, y_test) = pickle.load(f)
    x, smb = concatenate_data(x_train, y_train, x_test, y_test)
    test_index = len(y_train)
    data_size = len(smb)
    model = load_check_point(model_path)
    pred = model.predict(x)[:, 0]
    fig = go.Figure(data=[
        go.Scatter(x=np.arange(data_size),
                   y=pred,
                   mode='lines+markers',
                   name='Predicted'),
        go.Scatter(
            x=np.arange(data_size), y=smb, mode='lines+markers', name='Actual')
    ])
    fig.add_vline(x=test_index,
                  line_dash="dash",
                  line_color="green",
                  annotation_text="Predictions on test data")
    fig.update_layout(title='Comparison Between Predicted and Actual')
    fig.update_layout(autosize=True, margin=dict(t=50, b=20, l=20, r=20))
    return fig
Ejemplo n.º 6
0
    epochs = args.epochs
    dataset = DatasetEnum(args.dataset)
    attack_type = AttackType(args.attack)
    loss = F.cross_entropy
    epsilon, step_size = get_attack_params(dataset)
    epsilon = args.epsilon if not None else epsilon
    step_size = args.step_size if not None else step_size
    lr = get_dataset_default_lr(dataset) if args.lr == -1 else args.lr
    use_cuda = torch.cuda.is_available()
    model, optimizer = utils.init(dataset, lr, use_cuda)
    train_perturbation = FGSMAttack(loss, model, epsilon).perturb
    os.environ['seed'] = str(args.seed)

    if args.model_path != "":
        test_loader = utils.only_test(dataset, args.batch_size)
        model, optimizer, start_epoch = utils.load_check_point(
            "./checkpoint/" + args.model_path, model, use_cuda, optimizer)
        test(test_loader, model, use_cuda, dataset)
    else:
        pert_str = f'{args.pert_file} file' if args.pert_file is not None else f'random sample with seed {args.seed}'
        print(
            f"Training saved in {checkpoint_path}. Using {args.pert_count} perturabtions in range: {args.pert_range}, via {pert_str} on dataset {dataset}"
        )
        train_loader, test_loader = load_data(dataset, args.pert_file,
                                              pert_count, batch_size,
                                              args.pert_range)
        train_loss, train_acc, test_loss, test_acc = train_model(
            train_loader, test_loader, use_cuda, epochs, model, optimizer,
            dataset, loss)

    if attack_type == AttackType.FGSM_ATTACK:
        acc = fgsm_attack(test_loader,
from torch import nn
import torch
import torch.optim as optim
from dataset import CTDataset, CTDataLoader
from flexible_model import dice_loss, Unet, Flex_Unet
from utils import save_check_point, load_check_point, save_history, load_history
from tqdm import tqdm
from visualize import plot_3d
import numpy as np

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = './model/Best_model_14_0.0798.pth.tar'
batch_size = 1

# Load model checkpoint that is to be evaluated
model, _, _ = load_check_point(checkpoint)
model.double().to(device)
loss_function = dice_loss
model.eval()
# build data loader
data = CTDataset('./data/DCM_Test.json')
test_loader = CTDataLoader(data, 1, batch_size=batch_size, mode="testing")

# evaluate
result = []
area = []
with torch.no_grad():
    for i, (data, target,
            address) in enumerate(tqdm(test_loader, desc='Evaluating')):
        output = model(data.to(device))
        p, t = np.round(np.array(output.cpu())), np.round(
Ejemplo n.º 8
0
def prepare_submission(train_id, test_fn, path='results/'):
    """"""
    net, mdl, params = load_check_point(train_id, path=path)
    with h5py.File(params['data_fn']) as hf:
        test(mdl, hf, train_id, test_fn, params)