示例#1
0
def visdom_loss(visdom, loss_step, loss_dict):
    loss_data['X'].append(loss_step)
    loss_data['Y'].append([loss_dict[k] for k in loss_data['legend_U']])
    visdom.line(X=np.stack([np.array(loss_data['X'])] *
                           len(loss_data['legend_U']), 1),
                Y=np.array(loss_data['Y']),
                win=1,
                opts=dict(xlabel='Step',
                          ylabel='Loss',
                          title='Training loss',
                          legend=loss_data['legend_U']),
                update='append')
示例#2
0
def create_vis_plot(_xlabel, _ylabel, _title, _legend):
    return viz.line(X=torch.zeros((1, )).cpu(),
                    Y=torch.zeros((1, 3)).cpu(),
                    opts=dict(xlabel=_xlabel,
                              ylabel=_ylabel,
                              title=_title,
                              legend=_legend))
示例#3
0
def update_vis_plot(iteration, loc, conf, window1, window2, update_type,
                    epoch_size=1):
    viz.line(
        X=torch.ones((1, 3)).cpu() * iteration,
        Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,
        win=window1,
        update=update_type
    )
    # initialize epoch plot on first iteration
    if iteration == 0:
        viz.line(
            X=torch.zeros((1, 3)).cpu(),
            Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),
            win=window2,
            update=True
        )
示例#4
0
 def create_plot_window(vis, xlabel, ylabel, title):
     return vis.line(X=np.array([1]),
                     Y=np.array([np.nan]),
                     opts=dict(xlabel=xlabel,
                               ylabel=ylabel,
                               title=title))
示例#5
0
def train_model(model,
                criterion,
                optimizer,
                scheduler,
                num_epochs,
                use_gpu,
                vis=None):

    since = time.time()
    best_model_wts = model.state_dict()
    best_acc = 0.0
    cnt = 0

    for epoch in range(num_epochs):
        begin_time = time.time()
        count_batch = 0
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        # Each epoch has a training and validation phase 训练和验证
        for phase in ['train', 'val']:
            if phase == 'train':
                scheduler.step()
                model.train(True)  # Set model to training mode
            else:
                model.train(False)  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0.0

            lines = []
            # Iterate over data.迭代数据
            for data in dataloders[phase]:
                count_batch += 1
                # get the inputs
                inputs, labels, name = data
                # wrap them in Variable
                if use_gpu:
                    inputs = Variable(inputs.cuda())
                    labels = Variable(labels.cuda())
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                outputs = model(inputs)
                _, preds = torch.max(outputs.data, 1)
                '''
                if phase == 'val'and epoch==num_epochs-1:
                    lines.append(preds)
                '''

                loss = criterion(outputs, labels)
                # print(_, preds)
                # print(loss)

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()
                # statistics
                running_loss += loss.item()
                running_corrects += torch.sum(preds == labels.data)

                # print result every 10 batch
                if count_batch % 1 == 0:
                    # a = 0.0
                    batch_loss = running_loss / (batch_size * count_batch)
                    running_corrects = running_corrects.item()
                    batch_acc = running_corrects / (batch_size * count_batch)
                    # a=running_corrects / (batch_size*count_batch)
                    # print(running_corrects)
                    # print(len(preds))
                    # print(batch_size*count_batch)
                    # print(a)
                    #print('{} Epoch [{}] Batch [{}] Loss: {:.4f} Acc: {:.4f} Time: {:.4f}s'. \
                    #      format(phase, epoch, count_batch, batch_loss, batch_acc, time.time()-begin_time))
                    # print(batch_acc)
                    begin_time = time.time()
                    if vis != None and phase == 'train':
                        vis.line("train/batch_loss", batch_loss, cnt)
                        vis.line("train/batch_acc", batch_acc, cnt)
                        cnt += 1

            # running_corrects=running_corrects.item()
            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects / dataset_sizes[phase]

            if (vis != None and phase == 'train'):
                vis.line("train/epoch_loss", epoch_loss, epoch)
                vis.line("train/epoch_acc", epoch_acc, epoch)
            elif (vis != None and phase == 'val'):
                vis.line("val/epoch_loss", epoch_loss, epoch)
                vis.line("val/epoch_acc", epoch_acc, epoch)
            # running_corrects = running_corrects.item()
            print(running_corrects)
            print(dataset_sizes[phase])
            #print(epoch_acc)
            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss,
                                                       epoch_acc))

            # save model
            if phase == 'train':
                if not os.path.exists('output_aspp_pre_se_S1_E2_3'):
                    os.makedirs('output_aspp_pre_se_S1_E2_3')
                torch.save(
                    model,
                    'output_aspp_pre_se_S1_E2_3/dasppp_epoch{}.pkl'.format(
                        epoch))

            # deep copy the model
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = model.state_dict()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model
示例#6
0
def membership_inference(args, load_data_module, download_mnist):
    # load clusters:
    clusters = None
    if args.number_arms is not None:
        clusters_file = "clusters_K=%d_pca=%d.torch" % (args.number_arms,
                                                        args.pca)
        clusters_file = os.path.join(load_data_module.MEMOIZE_FOLDER,
                                     clusters_file)
        logging.info("Loading clusters from file...")
        clusters = torch.load(clusters_file)

    # load dataset:
    train_data, _ = load_data_module.load_data(
        split="train", download_mnist_func=download_mnist)
    components = examples.util.pca(train_data, args.pca)
    positive_set = load_data_module.load_data(
        split="train",
        pca=components,
        clusters=clusters,
        bandwidth=args.bandwidth,
        download_mnist_func=download_mnist,
    )
    negative_set = load_data_module.load_data(
        split="test",
        pca=components,
        clusters=clusters,
        bandwidth=args.bandwidth,
        download_mnist_func=download_mnist,
    )

    # get list of checkpoints:
    model_files = [
        os.path.join(args.checkpoint_folder, filename)
        for filename in os.listdir(args.checkpoint_folder)
        if filename.endswith(".torch")
    ]
    model_files = sorted(model_files)
    iterations = [
        int(os.path.splitext(f)[0].split("_")[-1]) for f in model_files
    ]

    # load permutation used in training:
    perm = load_data_module.load_data_sampler(
        permfile=args.permfile, download_mnist_func=download_mnist)

    def subset(dataset, iteration):
        ids = perm[:iteration]
        return tuple(d[ids, :] for d in dataset)

    # measure accuracies of membership inference attacs:
    advantage = [
        membership_accuracy(
            torch.load(model_file),
            subset(positive_set, iteration),
            negative_set,
            epsilon=args.epsilon,
        ) for model_file, iteration in zip(model_files, iterations)
    ]

    # save advantages to file:
    if args.savefile is not None:
        with open(args.savefile, "wb") as fid:
            pickle.dump(advantage, fid)

    # plot advantages:
    if args.visualize:
        opts = {
            "xlabel": "Number of iterations",
            "ylabel": "Accuracy of inference attack",
        }
        visdom.line(iterations, advantage, opts=opts)