コード例 #1
0
    def loadData(self):       
        
        trainset = SimulationDataset("train", transforms=transforms.Compose([                 
                utils.RandomCoose(['center']),          
                utils.Preprocess(self.input_shape),
                # utils.RandomResizedCrop(self.input_shape),
                # utils.RandomNoise(),
                utils.RandomTranslate(10, 10),
                # utils.RandomBrightness(),
                # utils.RandomContrast(),
                # utils.RandomHue(),
                utils.RandomHorizontalFlip(),
                utils.ToTensor(),
                utils.Normalize([0.1, 0.4, 0.4], [0.9, 0.6, 0.5])
            ]))
        # weights = utils.get_weights(trainset)
        # sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights), replacement=False)
        # self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.cfg.batch_size, sampler=sampler, num_workers=0, pin_memory=True)
        self.trainloader = torch.utils.data.DataLoader(trainset, shuffle=True, batch_size=self.cfg.batch_size, num_workers=0, pin_memory=True)

        testset = SimulationDataset("test", transforms=transforms.Compose([
                utils.RandomCoose(['center']),
                utils.Preprocess(self.input_shape),
                utils.ToTensor(),
                utils.Normalize([0.1, 0.4, 0.4], [0.9, 0.6, 0.5])
            ]))
        self.testloader = torch.utils.data.DataLoader(testset, batch_size=self.cfg.batch_size, shuffle=False, num_workers=0, pin_memory=True)
コード例 #2
0
ファイル: test3.py プロジェクト: SouhardK/IE-Project-2019
    def predict(self, image, preloaded=False):

        # set test mode

        self.net.eval()

        if (not preloaded):

            loadModel()

            print('Loaded Model')

        print('Starting Prediction')

        composed = transforms.Compose([
            utils.Preprocess(self.input_shape),
            utils.ToTensor(),
            utils.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        # Target gets discareded

        sample = {'image': image, 'target': 0}

        sample = composed(sample)

        inputs = sample['image']

        # Add single batch diemension

        inputs = inputs.unsqueeze(0)

        if (self.cfg.cuda):

            inputs = Variable(inputs.cuda(async=True))

        else:

            inputs = Variable(inputs)

        if (self.cfg.cuda):

            outputs = self.net(inputs).cuda(async=True)

        else:

            outputs = self.net(inputs)

        print('Finished Prediction')

        print('Control tensor: %.6f ' % (outputs.item()))

        # set train mode

        self.net.train()

        return outputs.item()
コード例 #3
0
ファイル: test3.py プロジェクト: SouhardK/IE-Project-2019
    def loadData(self):

        trainset = SimulationDataset(
            "train",
            transforms=transforms.Compose([
                utils.RandomCoose(['centre', 'left', 'right']),
                utils.Preprocess(self.input_shape),
                utils.RandomTranslate(100, 10),
                utils.RandomBrightness(),
                utils.RandomContrast(),
                utils.RandomHue(),
                utils.RandomHorizontalFlip(),
                utils.ToTensor(),
                utils.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]))

        weights = utils.get_weights(trainset)

        sampler = torch.utils.data.sampler.WeightedRandomSampler(
            weights, len(weights), replacement=True)

        # self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.cfg.batch_size, sampler=sampler, num_workers=4)

        self.trainloader = torch.utils.data.DataLoader(
            trainset, batch_size=self.cfg.batch_size, num_workers=4)

        testset = SimulationDataset("test",
                                    transforms=transforms.Compose([
                                        utils.RandomCoose(['center']),
                                        utils.Preprocess(self.input_shape),
                                        utils.ToTensor(),
                                        utils.Normalize([0.485, 0.456, 0.406],
                                                        [0.229, 0.224, 0.225])
                                    ]))

        self.testloader = torch.utils.data.DataLoader(
            testset,
            batch_size=self.cfg.batch_size,
            shuffle=False,
            num_workers=4)
コード例 #4
0
        # plt.imshow(F.to_pil_image(sample['image']))
        # plt.title(str(sample['target']))
        # plt.show()

        return sample['image'], sample['target']

    def __len__(self):
        return len(self.image_paths)


if __name__ == '__main__':

    input_shape = (utils.IMAGE_HEIGHT, utils.IMAGE_WIDTH)
    dataset = SimulationDataset("train",
                                transforms=transforms.Compose([
                                    utils.RandomCoose(['center']),
                                    utils.Preprocess(input_shape),
                                    utils.RandomHorizontalFlip(),
                                    utils.ToTensor(),
                                    utils.Normalize([0.485, 0.456, 0.406],
                                                    [0.229, 0.224, 0.225])
                                ]))
    print(dataset.__len__())
    print(dataset.__getitem__(0)[0].size())

    for c in range(3):
        for i in range(dataset.__len__()):
            print(dataset.__getitem__(i)[c].mean())
            print(dataset.__getitem__(i)[c].std())
    # print(dataset.__getitem__(0))
    # print(len(dataset.__get_annotations__()))
コード例 #5
0
ファイル: eval.py プロジェクト: trainsn/TSR-TVD
def main(args):
    # log hyperparameter
    print(args)

    # select device
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda: 0" if args.cuda else "cpu")

    # set random seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    # data loader
    transform = transforms.Compose([utils.Normalize(), utils.ToTensor()])

    infer_dataset = InferTVDataset(root=args.root,
                                   sub_size=args.block_size,
                                   volume_list="volume_test_list.txt",
                                   max_k=args.infering_step,
                                   transform=transform)

    kwargs = {"num_workers": 4, "pin_memory": True} if args.cuda else {}
    infer_loader = DataLoader(infer_dataset,
                              batch_size=args.batch_size,
                              shuffle=False,
                              **kwargs)

    # model
    def generator_weights_init(m):
        if isinstance(m, nn.Conv3d):
            nn.init.kaiming_normal_(m.weight,
                                    mode='fan_out',
                                    nonlinearity='relu')
            if m.bias is not None:
                nn.init.zeros_(m.bias)

    g_model = Generator(args.upsample_mode, args.forward, args.backward,
                        args.gen_sn)
    g_model.apply(generator_weights_init)
    if args.data_parallel and torch.cuda.device_count() > 1:
        g_model = nn.DataParallel(g_model)
    g_model.to(device)

    mse_loss = nn.MSELoss()
    adversarial_loss = nn.MSELoss()
    train_losses, test_losses = [], []
    d_losses, g_losses = [], []

    # load checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint {}".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            g_model.load_state_dict(checkpoint["g_model_state_dict"])
            # g_optimizer.load_state_dict(checkpoint["g_optimizer_state_dict"])
            if args.gan_loss != "none":
                # d_model.load_state_dict(checkpoint["d_model_state_dict"])
                # d_optimizer.load_state_dict(checkpoint["d_optimizer_state_dict"])
                d_losses = checkpoint["d_losses"]
                g_losses = checkpoint["g_losses"]
            train_losses = checkpoint["train_losses"]
            test_losses = checkpoint["test_losses"]
            print("=> load chekcpoint {} (epoch {})".format(
                args.resume, checkpoint["epoch"]))

    g_model.eval()
    inferRes = []
    zSize, ySize, xSize = 120, 720, 480
    for i in range(args.infering_step):
        inferRes.append(np.zeros((zSize, ySize, xSize)))
    inferScale = np.zeros((zSize, ySize, xSize))
    time_start = 0
    volume_type = ''

    with torch.no_grad():
        for i, sample in tqdm(enumerate(infer_loader)):
            v_f = sample["v_f"].to(device)
            v_b = sample["v_b"].to(device)
            fake_volumes = g_model(v_f, v_b, args.infering_step,
                                   args.wo_ori_volume, args.norm)
            volume_type, time_start, x_start, y_start, z_start = utils.Parse(
                sample["vf_name"][0])

            for j in range(fake_volumes.shape[1]):
                volume = fake_volumes[0, j, 0]
                min_value = -0.015  # -0.012058
                max_value = 1.01  # 1.009666
                mean = (min_value + max_value) / 2
                std = mean - min_value
                volume = volume.to("cpu").numpy() * std + mean

                inferRes[j][z_start:z_start + args.block_size,
                            y_start:y_start + args.block_size,
                            x_start:x_start + args.block_size] += volume
                if j == 0:
                    inferScale[z_start:z_start + args.block_size,
                               y_start:y_start + args.block_size,
                               x_start:x_start + args.block_size] += 1
                # pdb.set_trace()

    for j in range(args.infering_step):
        inferRes[j] = inferRes[j] / inferScale
        inferRes[j] = inferRes[j].astype(np.float32)

        volume_name = volume_type + '_' + ("%04d" %
                                           (time_start + j + 1)) + '.raw'
        inferRes[j].tofile(os.path.join(args.save_pred, volume_name))
コード例 #6
0
def make_z(args, shape, minval, maxval):
    z = minval + torch.rand(shape) * (maxval - 1)
    return z.to(args.device)


def save_checkpoint(state, epoch):
    ckpt_dir = 'home/vkv/NAG/ckpt/'
    print("[*] Saving model to {}".format(ckpt_dir))

    filename = 'NAG' + '_ckpt.pth.tar'
    ckpt_path = os.path.join(ckpt_dir, filename)
    torch.save(state, ckpt_path)


normalize = utils.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

parser = argparse.ArgumentParser(description='BlackBox')
parser.add_argument("--comet_username",
                    type=str,
                    default="joeybose",
                    help='Username for comet logging')
parser.add_argument("--comet_apikey", type=str,\
        default="Ht9lkWvTm58fRo9ccgpabq5zV",help='Api for comet logging')
parser.add_argument('--model_path',
                    type=str,
                    default="mnist_cnn.pt",
                    help='where to save/load')
parser.add_argument('--mnist',
                    default=False,
                    action='store_true',
コード例 #7
0
def main(args):
    # log hyperparameter
    print(args)

    # select device
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda: 0" if args.cuda else "cpu")

    # set random seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    # data loader
    transform = transforms.Compose([
        utils.Normalize(),
        utils.ToTensor()
    ])
    train_dataset = TVDataset(
        root=args.root,
        sub_size=args.block_size,
        volume_list=args.volume_train_list,
        max_k=args.training_step,
        train=True,
        transform=transform
    )
    test_dataset = TVDataset(
        root=args.root,
        sub_size=args.block_size,
        volume_list=args.volume_test_list,
        max_k=args.training_step,
        train=False,
        transform=transform
    )

    kwargs = {"num_workers": 4, "pin_memory": True} if args.cuda else {}
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size,
                              shuffle=True, **kwargs)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size,
                             shuffle=False, **kwargs)

    # model
    def generator_weights_init(m):
        if isinstance(m, nn.Conv3d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.zeros_(m.bias)

    def discriminator_weights_init(m):
        if isinstance(m, nn.Conv3d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
            if m.bias is not None:
                nn.init.zeros_(m.bias)

    g_model = Generator(args.upsample_mode, args.forward, args.backward, args.gen_sn, args.residual)
    g_model.apply(generator_weights_init)
    if args.data_parallel and torch.cuda.device_count() > 1:
        g_model = nn.DataParallel(g_model)
    g_model.to(device)

    if args.gan_loss != "none":
        d_model = Discriminator(args.dis_sn)
        d_model.apply(discriminator_weights_init)
        # if args.dis_sn:
        #     d_model = add_sn(d_model)
        if args.data_parallel and torch.cuda.device_count() > 1:
            d_model = nn.DataParallel(d_model)
        d_model.to(device)

    mse_loss = nn.MSELoss()
    adversarial_loss = nn.MSELoss()
    train_losses, test_losses = [], []
    d_losses, g_losses = [], []

    # optimizer
    g_optimizer = optim.Adam(g_model.parameters(), lr=args.lr,
                             betas=(args.beta1, args.beta2))
    if args.gan_loss != "none":
        d_optimizer = optim.Adam(d_model.parameters(), lr=args.d_lr,
                                 betas=(args.beta1, args.beta2))

    Tensor = torch.cuda.FloatTensor if args.cuda else torch.FloatTensor

    # load checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint {}".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            g_model.load_state_dict(checkpoint["g_model_state_dict"])
            # g_optimizer.load_state_dict(checkpoint["g_optimizer_state_dict"])
            if args.gan_loss != "none":
                d_model.load_state_dict(checkpoint["d_model_state_dict"])
                # d_optimizer.load_state_dict(checkpoint["d_optimizer_state_dict"])
                d_losses = checkpoint["d_losses"]
                g_losses = checkpoint["g_losses"]
            train_losses = checkpoint["train_losses"]
            test_losses = checkpoint["test_losses"]
            print("=> load chekcpoint {} (epoch {})"
                  .format(args.resume, checkpoint["epoch"]))

    # main loop
    for epoch in tqdm(range(args.start_epoch, args.epochs)):
        # training..
        g_model.train()
        if args.gan_loss != "none":
            d_model.train()
        train_loss = 0.
        volume_loss_part = np.zeros(args.training_step)
        for i, sample in enumerate(train_loader):
            params = list(g_model.named_parameters())
            # pdb.set_trace()
            # params[0][1].register_hook(lambda g: print("{}.grad: {}".format(params[0][0], g)))
            # adversarial ground truths
            real_label = Variable(Tensor(sample["v_i"].shape[0], sample["v_i"].shape[1], 1, 1, 1, 1).fill_(1.0), requires_grad=False)
            fake_label = Variable(Tensor(sample["v_i"].shape[0], sample["v_i"].shape[1], 1, 1, 1, 1).fill_(0.0), requires_grad=False)

            v_f = sample["v_f"].to(device)
            v_b = sample["v_b"].to(device)
            v_i = sample["v_i"].to(device)
            g_optimizer.zero_grad()
            fake_volumes = g_model(v_f, v_b, args.training_step, args.wo_ori_volume, args.norm)

            # adversarial loss
            # update discriminator
            if args.gan_loss != "none":
                avg_d_loss = 0.
                avg_d_loss_real = 0.
                avg_d_loss_fake = 0.
                for k in range(args.n_d):
                    d_optimizer.zero_grad()
                    decisions = d_model(v_i)
                    d_loss_real = adversarial_loss(decisions, real_label)
                    fake_decisions = d_model(fake_volumes.detach())

                    d_loss_fake = adversarial_loss(fake_decisions, fake_label)
                    d_loss = d_loss_real + d_loss_fake
                    d_loss.backward()
                    avg_d_loss += d_loss.item() / args.n_d
                    avg_d_loss_real += d_loss_real / args.n_d
                    avg_d_loss_fake += d_loss_fake / args.n_d

                    d_optimizer.step()

            # update generator
            if args.gan_loss != "none":
                avg_g_loss = 0.
            avg_loss = 0.
            for k in range(args.n_g):
                loss = 0.
                g_optimizer.zero_grad()

                # adversarial loss
                if args.gan_loss != "none":
                    fake_decisions = d_model(fake_volumes)
                    g_loss = args.gan_loss_weight * adversarial_loss(fake_decisions, real_label)
                    loss += g_loss
                    avg_g_loss += g_loss.item() / args.n_g

                # volume loss
                if args.volume_loss:
                    volume_loss = args.volume_loss_weight * mse_loss(v_i, fake_volumes)
                    for j in range(v_i.shape[1]):
                        volume_loss_part[j] += mse_loss(v_i[:, j, :], fake_volumes[:, j, :]) / args.n_g / args.log_every
                    loss += volume_loss

                # feature loss
                if args.feature_loss:
                    feat_real = d_model.extract_features(v_i)
                    feat_fake = d_model.extract_features(fake_volumes)
                    for m in range(len(feat_real)):
                        loss += args.feature_loss_weight / len(feat_real) * mse_loss(feat_real[m], feat_fake[m])

                avg_loss += loss / args.n_g
                loss.backward()
                g_optimizer.step()

            train_loss += avg_loss

            # log training status
            subEpoch = (i + 1) // args.log_every
            if (i+1) % args.log_every == 0:
                print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
                    epoch, (i+1) * args.batch_size, len(train_loader.dataset), 100. * (i+1) / len(train_loader),
                    avg_loss
                ))
                print("Volume Loss: ")
                for j in range(volume_loss_part.shape[0]):
                    print("\tintermediate {}: {:.6f}".format(
                        j+1, volume_loss_part[j]
                    ))

                if args.gan_loss != "none":
                    print("DLossReal: {:.6f} DLossFake: {:.6f} DLoss: {:.6f}, GLoss: {:.6f}".format(
                        avg_d_loss_real, avg_d_loss_fake, avg_d_loss, avg_g_loss
                    ))
                    d_losses.append(avg_d_loss)
                    g_losses.append(avg_g_loss)
                # train_losses.append(avg_loss)
                train_losses.append(train_loss.item() / args.log_every)
                print("====> SubEpoch: {} Average loss: {:.6f} Time {}".format(
                    subEpoch, train_loss.item() / args.log_every, time.asctime(time.localtime(time.time()))
                ))
                train_loss = 0.
                volume_loss_part = np.zeros(args.training_step)

            # testing...
            if (i + 1) % args.test_every == 0:
                g_model.eval()
                if args.gan_loss != "none":
                    d_model.eval()
                test_loss = 0.
                with torch.no_grad():
                    for i, sample in enumerate(test_loader):
                        v_f = sample["v_f"].to(device)
                        v_b = sample["v_b"].to(device)
                        v_i = sample["v_i"].to(device)
                        fake_volumes = g_model(v_f, v_b, args.training_step, args.wo_ori_volume, args.norm)
                        test_loss += args.volume_loss_weight * mse_loss(v_i, fake_volumes).item()

                test_losses.append(test_loss * args.batch_size / len(test_loader.dataset))
                print("====> SubEpoch: {} Test set loss {:4f} Time {}".format(
                    subEpoch, test_losses[-1], time.asctime(time.localtime(time.time()))
                ))

            # saving...
            if (i+1) % args.check_every == 0:
                print("=> saving checkpoint at epoch {}".format(epoch))
                if args.gan_loss != "none":
                    torch.save({"epoch": epoch + 1,
                                "g_model_state_dict": g_model.state_dict(),
                                "g_optimizer_state_dict":  g_optimizer.state_dict(),
                                "d_model_state_dict": d_model.state_dict(),
                                "d_optimizer_state_dict": d_optimizer.state_dict(),
                                "d_losses": d_losses,
                                "g_losses": g_losses,
                                "train_losses": train_losses,
                                "test_losses": test_losses},
                               os.path.join(args.save_dir, "model_" + str(epoch) + "_" + str(subEpoch) + "_" + "pth.tar")
                               )
                else:
                    torch.save({"epoch": epoch + 1,
                                "g_model_state_dict": g_model.state_dict(),
                                "g_optimizer_state_dict": g_optimizer.state_dict(),
                                "train_losses": train_losses,
                                "test_losses": test_losses},
                               os.path.join(args.save_dir, "model_" + str(epoch) + "_" + str(subEpoch) + "_" + "pth.tar")
                               )
                torch.save(g_model.state_dict(),
                           os.path.join(args.save_dir, "model_" + str(epoch) + "_" + str(subEpoch) + ".pth"))

        num_subEpoch = len(train_loader) // args.log_every
        print("====> Epoch: {} Average loss: {:.6f} Time {}".format(
            epoch, np.array(train_losses[-num_subEpoch:]).mean(), time.asctime(time.localtime(time.time()))
        ))
コード例 #8
0
from dataloader import SimulationDataset
import matplotlib.pyplot as plt
from PIL import Image

import utils as utils
import torch
from torchvision import transforms
import torchvision.transforms.functional as F

input_shape = (utils.IMAGE_HEIGHT, utils.IMAGE_WIDTH)
dataset = SimulationDataset("train",
                            transforms=transforms.Compose([
                                utils.RandomCoose(['center']),
                                utils.Preprocess(input_shape),
                                utils.ToTensor(),
                                utils.Normalize([0.1, 0.4, 0.4],
                                                [0.9, 0.6, 0.5])
                            ]))

targets = []

for i in range(dataset.__len__()):
    image, target = dataset.__getitem__(i)
    targets.append(target)
    # plt.imshow(F.to_pil_image(image))
    # plt.title(str(target))
    # plt.show()

plt.hist(targets, 50)
plt.show()
コード例 #9
0
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_true_f = K.round(y_true_f)
    y_pred_f = K.round(y_pred_f)
    intersection = K.sum(K.abs(y_true_f * y_pred_f), axis=-1)
    return (2. * intersection + smooth) / (K.sum(y_true_f,-1) + K.sum(y_pred_f,-1) + smooth)

model=load_model("./T2_Ax_result_ProstateCancerClassification_NormalvsCancer_01/weights-14.h5", custom_objects={'JunctionWeightLayer': utils.JunctionWeightLayer, 'dice_coef': dice_coef})
#%%
img_list = img_hdf5[1233+100:]
y_true = label_hdf5[1233+100:]
y_true=np.array(y_true>=1, dtype=np.uint8)

img_norm=[]
for img in img_list:
    img_norm.append(utils.Normalize(img))

y_predict=model.predict(np.array(img_norm))
y_predict = y_predict.flatten()

#for i in range(len(y_true)):
#    print(y_true[i], y_predict[i])
from sklearn.metrics import auc,average_precision_score, precision_recall_curve,classification_report, f1_score, confusion_matrix,brier_score_loss
from sklearn.metrics import roc_auc_score,roc_curve ,fowlkes_mallows_score
title="ROC Curve for case detection with prostate cancer"
utils.plotROCCurveMultiCall(plt,y_true, y_predict, title)
plt.savefig("./PCA_MRI_DETECTION_roc_curve.eps", transparent=True)
plt.savefig("./PCA_MRI_DETECTION_roc_curve.pdf", transparent=True)
plt.savefig("./PCA_MRI_DETECTION_roc_curve.png", transparent=True)
plt.show()
plt.close()
コード例 #10
0
ファイル: main.py プロジェクト: joeybose/BlackMagicDesign
def main(args):
    if args.mnist:
        # Normalize image for MNIST
        # normalize = Normalize(mean=(0.1307,), std=(0.3081,))
        normalize = None
        args.input_size = 784
    elif args.cifar:
        normalize = utils.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        args.input_size = 32 * 32 * 3
    else:
        # Normalize image for ImageNet
        normalize = utils.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])
        args.input_size = 150528

    # Load data
    train_loader, test_loader = utils.get_data(args)

    # The unknown model to attack
    unk_model = utils.load_unk_model(args)

    # Try Whitebox Untargeted first
    if args.debug:
        ipdb.set_trace()

    if args.train_vae:
        encoder, decoder, vae = train_mnist_vae(args)
    else:
        encoder, decoder, vae = None, None, None

    if args.train_ae:
        encoder, decoder, ae = train_mnist_ae(args)
    else:
        encoder, decoder, ae = None, None, None

    # Add A Flow
    norm_flow = None
    if args.use_flow:
        # norm_flow = flows.NormalizingFlow(30, args.latent).to(args.device)
        norm_flow = flows.Planar
    # Test white box
    if args.white:
        # Choose Attack Function
        if args.no_pgd_optim:
            white_attack_func = attacks.L2_white_box_generator
        else:
            white_attack_func = attacks.PGD_white_box_generator

        # Choose Dataset
        if args.mnist:
            G = models.Generator(input_size=784).to(args.device)
        elif args.cifar:
            if args.vanilla_G:
                G = models.DCGAN().to(args.device)
                G = nn.DataParallel(G.generator)
            else:
                G = models.ConvGenerator(models.Bottleneck,[6,12,24,16],growth_rate=12,\
                                     flows=norm_flow,use_flow=args.use_flow,\
                                     deterministic=args.deterministic_G).to(args.device)
                G = nn.DataParallel(G)
            nc, h, w = 3, 32, 32

        if args.run_baseline:
            attacks.whitebox_pgd(args, unk_model)

        pred, delta = white_attack_func(args, train_loader,\
                test_loader, unk_model, G, nc, h, w)

    # Blackbox Attack model
    model = models.GaussianPolicy(args.input_size,
                                  400,
                                  args.latent_size,
                                  decode=False).to(args.device)

    # Control Variate
    cv = to_cuda(models.FC(args.input_size, args.classes))
コード例 #11
0
    print utils.runcmd('cd ~/cache/nova-specs; '
                       'git checkout master; '
                       'git pull')

    for ent in os.listdir(
            os.path.expanduser('~/cache/nova-specs'
                               '/specs/%s' % RELEASE)):
        if not ent.endswith('.rst'):
            continue
        APPROVED_SPECS.append(ent[:-4])

    possible = reviews.component_reviews('openstack/nova-specs')
    for review in filter_obvious(possible):
        try:
            bp_name = review.get('topic', 'bp/nosuch').split('/')[1]
        except:
            bp_name = review.get('topic', '')

        PROPOSED_SPECS.append(bp_name)

    targets()

    for header in HEADERS:
        print '<li><a href="#%s">%s</a>' % (header.replace(' ', '_'), header)
    print '<br/><br/>'
    print utils.Normalize('\n'.join(OUTPUT))
    print
    print 'I printed %d reviews' % len(PRINTED)
    print 'Generated at: %s' % datetime.datetime.now()