Ejemplo n.º 1
0
    def __init__(self, args, rank):
        self.rank = rank
        self.epoch_loss = 0
        self.epoch_acc = 0

        self.args = args

        self.model, input_size, self.quant_model = initialize_model(
            args.model_name, get_num_classes(args.image_path))

        self.dataloaders_dict = preprocess_data(args.image_path,
                                                args.batch_size, input_size,
                                                args.num_workers, rank)

        self.train_iterator = iter(self.dataloaders_dict['train'])

        print("Params to learn:")
        params_to_update = []
        for name, param in self.model.named_parameters():
            if param.requires_grad == True:
                params_to_update.append(param)
                print("\t", name)

        self.optimizer = optim.Adam(params_to_update, lr=0.001)

        self.criterion = nn.CrossEntropyLoss()
Ejemplo n.º 2
0
def start():
    num_classes = get_num_classes(args.data_dir)

    # net = InceptionResnetV1(classify=True, num_classes=num_classes,dropout_prob=args.dropout_prob).train().to(DEVICE)

    if os.path.isfile(args.model_path):
        net = torch.load(args.model_path)
    else:
        net = InceptionResnetV2(
            classify=True,
            num_classes=num_classes,
            dropout_prob=args.dropout_prob).train().to(DEVICE)

    if torch.cuda.device_count() > 1:
        net = nn.DataParallel(net)

    if not args.only_test:
        # 进行训练和测试
        net_training(net, num_classes)
        test(num_classes)
    elif args.only_test and not os.path.isfile(args.model_path):
        # 只测试且但是没有模型
        assert TestModelError("The model to test is not set")
    else:
        # 只进行测试
        test(num_classes)
Ejemplo n.º 3
0
def main(settings):

    with open(settings.template_path, 'r') as file:
        raw_template = file.read()

    template = Template(raw_template)

    num_eval_examples = utils.get_num_eval_examples()

    config = template.substitute({
        "train_num_steps": settings.train_num_steps,
        "eval_config_num_examples": num_eval_examples,
        "eval_config_num_visualizations": num_eval_examples,
        "num_classes": utils.get_num_classes()
    })

    with open(settings.output_path, 'w') as file:
        file.write(config)
Ejemplo n.º 4
0
def main(_):
    print("Model Architecture: {}".format(FLAGS.model_architecture))

    # Adjust some parameters
    if FLAGS.debug:
        FLAGS.small_label_set = False
        print("RUNNING IN DEBUG MODE")

    FLAGS.num_classes = utils.get_num_classes(FLAGS)

    X_train, y_train = data_utils.load_dataset_tf(FLAGS, mode="train")
    X_val, y_val = data_utils.load_dataset_tf(FLAGS, mode="val")

    # comet_ml experiment logging (https://www.comet.ml/)
    experiment = Experiment(api_key="J55UNlgtffTDmziKUlszSMW2w",
                            log_code=False)
    experiment.log_multiple_params(utils.gather_params(FLAGS))
    experiment.set_num_of_epocs(FLAGS.epochs)
    experiment.log_dataset_hash(X_train)

    tf.logging.set_verbosity(tf.logging.INFO)

    # Start a new, DEFAULT TensorFlow session.
    sess = tf.InteractiveSession()

    utils.set_seeds()  # Get deterministic behavior?

    model = models.create_model(FLAGS)
    fw = framework.Framework(sess, model, experiment, FLAGS)

    num_params = int(utils.get_number_of_params())
    model_size = num_params * 4
    print("\nNumber of trainable parameters: {}".format(num_params))
    print("Model is ~ {} bytes out of max 5000000 bytes\n".format(model_size))
    experiment.log_parameter("num_params", num_params)
    experiment.log_parameter("approx_model_size", model_size)

    fw.optimize(X_train, y_train, X_val, y_val)
Ejemplo n.º 5
0
    def __init__(self, args):
        self.args = args

        self.workers = [
            DataWorker.remote(args, i) for i in range(args.num_workers)
        ]

        self.model, input_size, self.quant_model = initialize_model(
            args.model_name, get_num_classes(args.image_path))

        self.dataloaders_dict = preprocess_data(args.image_path,
                                                args.batch_size, input_size,
                                                args.num_workers, 0)

        print("Params to learn:")
        params_to_update = []
        for name, param in self.model.named_parameters():
            if param.requires_grad == True:
                params_to_update.append(param)
                print("\t", name)

        self.optimizer = optim.Adam(params_to_update, lr=0.001)

        self.criterion = nn.CrossEntropyLoss()
Ejemplo n.º 6
0
        dataroot=args.dataroot,
        normalize=True,
    )
    print('Transform: {}'.format(train_dataset.transform),
          file=logs,
          flush=True)
    train_loader, size_train,\
        test_loader, size_test  = utils.get_dataloader( train_dataset,
                                                       test_dataset,
                                                       batch_size =args.batch_size,
                                                       size_max=args.size_max,
                                                       num_workers=4,
                                                       pin_memory=True,
                                                       collate_fn=None)

    num_classes = utils.get_num_classes(args.dataset)
    imsize = next(iter(train_loader))[0].size()[1:]
    input_dim = imsize[0] * imsize[1] * imsize[2]

    model = models.classifiers.FCNHelper(num_layers=args.depth,
                                         input_dim=input_dim,
                                         num_classes=num_classes,
                                         width=args.width)

    num_parameters = utils.num_parameters(model)
    num_samples_train = size_train
    num_samples_test = size_test
    print('Number of parameters: {}'.format(num_parameters), file=logs)
    print('Number of training samples: {}'.format(num_samples_train),
          file=logs)
    print('Number of testing samples: {}'.format(size_test), file=logs)
elif FLAGS.model == 'DeepSpeech2':
    model_fn = DBiRNN
elif FLAGS.model == 'CapsuleNetwork':
    model_fn = CapsuleNetwork
else:
    model_fn = None
rnncell = FLAGS.rnncell
num_layer = FLAGS.num_layer

activation_fn = activation_functions_dict[FLAGS.activation]
optimizer_fn = optimizer_functions_dict[FLAGS.optimizer]

batch_size = FLAGS.batch_size
num_hidden = FLAGS.num_hidden
num_feature = FLAGS.num_feature
num_classes = get_num_classes(level)
num_epochs = FLAGS.num_epochs
num_iter = FLAGS.num_iter
lr = FLAGS.lr
grad_clip = FLAGS.grad_clip
datadir = FLAGS.datadir

logdir = FLAGS.logdir
savedir = os.path.join(logdir, level, 'save')
resultdir = os.path.join(logdir, level, 'result')
loggingdir = os.path.join(logdir, level, 'logging')
check_path_exists([logdir, savedir, resultdir, loggingdir])

mode = FLAGS.mode
keep = FLAGS.keep
keep_prob = 1 - FLAGS.dropout_prob
Ejemplo n.º 8
0
        try:
            checkpoint_model = torch.load(m, map_location=device)  # checkpoint is a dictionnary with different keys
            root_model = os.path.dirname(m)
        except RuntimeError as e:
            print('Error loading the model at {}'.format(e))
        args_model = checkpoint_model['args']  # restore the previous arguments
        #imresize = checkpoint_model.get('imresize', None)
        log_model = os.path.join(os.path.dirname(m), 'logs.txt')

        path_output = os.path.join(root_model, args.name)
        os.makedirs(path_output, exist_ok=True)

        if hasattr(args_model, 'model') and args_model.model.find('vgg') != -1:
            # VGG model
            is_vgg=True
            NUM_CLASSES = utils.get_num_classes(args_model.dataset)
            model, _ = models.pretrained.initialize_model(args_model.model,
                                                pretrained=False,
                                                feature_extract=False,
                                                num_classes=NUM_CLASSES)
            model.n_layers = utils.count_hidden_layers(model)
            PrunedClassifier = models.classifiers.PrunedCopyVGG
            args.normalized=True


        else:
            is_vgg=False
            archi = utils.parse_archi(log_model)
            model = utils.construct_FCN(archi)
            PrunedClassifier = models.classifiers.PrunedCopyFCN
Ejemplo n.º 9
0
def main():
    
    expdir = os.path.join("exp", "train_" + args.tag)
    model_dir = os.path.join(expdir,"models")
    log_dir = os.path.join(expdir,"log")
    # args.model_dir = model_dir
    for x in ['exp', expdir, model_dir, log_dir]:
        if not os.path.isdir(x):
            os.mkdir(x)

    logfilename = os.path.join(log_dir, "log.txt")
    init_logfile(
        logfilename, 
        "arch={} epochs={} batch={} lr={} lr_step={} gamma={} noise_sd={} k_value={} eps_step={}".format(
        args.arch, args.epochs, args.batch, args.lr, args.lr_step_size, 
        args.gamma, args.noise_sd, args.k_value, args.eps_step))
    log(logfilename, "epoch\ttime\tlr\ttrain loss\ttrain acc\tval loss\tval acc")

    
    cifar_train = datasets.CIFAR10("./dataset_cache", train=True, download=True, transform=transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ]))
    cifar_val = datasets.CIFAR10("./dataset_cache", train=False, download=True, transform=transforms.ToTensor())


    train_loader = DataLoader(cifar_train, shuffle=True, batch_size=args.batch, num_workers=args.workers)
    val_loader = DataLoader(cifar_val, shuffle=False, batch_size=args.batch,num_workers=args.workers)
    
    # model = get_architecture(args.arch)
    if args.arch == "resnet18":
        model = torchvision.models.resnet18(pretrained=False, progress=True, **{"num_classes": get_num_classes()}).to(device)
    elif args.arch == "resnet34":
        model = torchvision.models.resnet34(pretrained=False, progress=True, **{"num_classes": get_num_classes()}).to(device)
    elif args.arch == "resnet50":
        model = torchvision.models.resnet50(pretrained=False, progress=True, **{"num_classes": get_num_classes()}).to(device)
    else:
        model = torchvision.models.resnet18(pretrained=False, progress=True, **{"num_classes": get_num_classes()}).to(device)
    
    criterion = CrossEntropyLoss()
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    scheduler = StepLR(optimizer, step_size=args.lr_step_size, gamma=args.gamma, verbose=True)

    for i in range(args.epochs):
        before = time.time()
        train_loss, train_acc = train(train_loader, model, criterion, optimizer, scheduler, i)
        val_loss, val_acc = validate(val_loader, model, criterion)
        after = time.time()

        log(logfilename, "{}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}".format(
            i, float(after - before),
            float(scheduler.get_last_lr()[0]), train_loss, train_acc, val_loss, val_acc))

        torch.save(
            {
            'epoch': i,
            'arch': args.arch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            
            }, os.path.join(model_dir,"ep{}.pth".format(i)))
Ejemplo n.º 10
0
def train(dataloader, model,criterion, optimizer, scheduler, epoch):
    model.train()
    print('epoch ' + str(epoch))

    train_loss = 0.0
    train_acc = 0.0
    total = len(dataloader)
    start = time.time()
    toPilImage = transforms.ToPILImage()    # transform tensor into PIL image to save

    for batch_num, (x, y) in enumerate(dataloader):
        x = x.to(device)
        y = y.to(device)


        # gauss noise training
        gauss_noise = torch.randn_like(x, device=device) * args.noise_sd
        # x_noise = x + torch.randn_like(x, device=device) * args.noise_sd

        # targeted noise training
        tmp_criterion = nn.CrossEntropyLoss()
        tmp_optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        classifier = PyTorchClassifier(
            model=model,
            clip_values=(min_pixel_value, max_pixel_value),
            loss=tmp_criterion,
            optimizer=tmp_optimizer,
            input_shape=(3, 32, 32),
            nb_classes=10,
        )

        # all other classes
        targets = []
        y_np = y.cpu().numpy()
        for i in range(y.shape[0]) :
            targets.append( np.expand_dims( np.random.permutation( np.delete(np.arange(get_num_classes()), y_np[i]) ), axis=0 ) )
        # print(targets[0].shape)
        targets = np.concatenate(targets)
        # print(targets.shape)
        # exit(0)

        mix_noise = torch.zeros_like(x)
        for t in range(targets.shape[1]):
            # generate random targets
            # targets = art.utils.random_targets(y.cpu().numpy(), get_num_classes())

            # calculate loss gradient
            # print(np.squeeze(targets[:,t]).shape)
            # exit()

            y_slice = np.squeeze(targets[:,t])
            y_oh = np.zeros((y_slice.size, get_num_classes()))
            y_oh[np.arange(y_slice.size), y_slice] = 1


            grad = classifier.loss_gradient(x=x.cpu().numpy(), y=y_oh) * (-1.0)
            scaled_grad = torch.Tensor(grad * args.eps_step).to(device)

            mix_noise += scaled_grad

            model.zero_grad()
            tmp_optimizer.zero_grad()

            # print((scaled_grad.shape, gauss_noise.shape, targets.shape))

        # combine noise and targeted noise
        x_combine = x + (gauss_noise * (1.0 - args.k_value)) + (mix_noise * args.k_value)

        model.zero_grad()

        output = model(x_combine)
        loss = criterion(output, y)
        acc = accuracy(output, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()       
        train_acc += acc

    scheduler.step()
    end = time.time()
    print('trainning time:',end - start,'sec, loss: ', train_loss/total, 'acc: ', train_acc/total)
    return train_loss/total, train_acc/total
Ejemplo n.º 11
0
                    default=100000,
                    help="number of samples to use")
parser.add_argument("--alpha",
                    type=float,
                    default=0.001,
                    help="failure probability")
args = parser.parse_args()

if __name__ == "__main__":
    # load the base classifier
    checkpoint = torch.load(args.base_classifier)
    base_classifier = get_architecture(checkpoint["arch"])
    base_classifier.load_state_dict(checkpoint['model_state_dict'])

    # create the smoothed classifier g
    smoothed_classifier = Smooth(base_classifier, get_num_classes(),
                                 args.sigma)

    # prepare output file
    f = open(args.outfile, 'w')
    print("idx\tlabel\tpredict\tcorrect\ttime", file=f, flush=True)

    # iterate through the dataset
    total_correct = 0.0
    total_count = 0.0
    dataset = datasets.CIFAR10("./dataset_cache",
                               train=False,
                               download=True,
                               transform=transforms.ToTensor())
    for i in range(len(dataset)):
Ejemplo n.º 12
0
                    help="eps_step parameter for PGD adversarial trainer")
parser.add_argument("--max_iter",
                    type=int,
                    default=5,
                    help="max_iter parameter for PGD adversarial trainer")
args = parser.parse_args()

## Step 1: load model and dataset

# load the base classifier
checkpoint = torch.load(args.base_classifier)
base_classifier = get_architecture(checkpoint["arch"])
base_classifier.load_state_dict(checkpoint['model_state_dict'])

# create the smooothed classifier g
smoothed_classifier = Smooth(base_classifier, get_num_classes(), args.sigma)

# # iterate through the dataset
dataset = datasets.CIFAR10("./dataset_cache",
                           train=False,
                           download=True,
                           transform=transforms.ToTensor())

y_test = np.asarray(dataset.targets)  # test labels
min_pixel_value = 0.0  # min value
max_pixel_value = 1.0  # max value

# Step 2: create an interface for classifier, load trained model, to be used by attack model trainer

# Define the loss function and the optimizer for attack model trainer
dev_dataset = FLAGS.dev_dataset
test_dataset = FLAGS.test_dataset

level = FLAGS.level
model_fn = DBiRNN  # 为什么不用FLAGS.model
rnncell = FLAGS.rnncell
num_layer = FLAGS.num_layer

activation_fn = activation_functions_dict[
    FLAGS.activation]  # 从字典中选出对应的activation_function
optimizer_fn = optimizer_functions_dict[FLAGS.optimizer]  # 从字典中选出对应的optimizer

batch_size = FLAGS.batch_size
num_hidden = FLAGS.num_hidden
num_feature = FLAGS.num_feature
num_classes = get_num_classes(level)  # 通过get_num_classes() 得到相应分类总数
num_steps = FLAGS.num_steps
num_epochs = FLAGS.num_epochs
lr = FLAGS.lr
grad_clip = FLAGS.grad_clip
datadir = FLAGS.datadir

logdir = FLAGS.logdir
savedir = os.path.join(logdir, level, 'save')
resultdir = os.path.join(logdir, level, 'result')
loggingdir = os.path.join(logdir, level, 'logging')
check_path_exists([logdir, savedir, resultdir, loggingdir])

mode = FLAGS.mode
keep = FLAGS.keep
keep_prob = 1 - FLAGS.dropout_prob
Ejemplo n.º 14
0
def train(dataloader, model, criterion, optimizer, scheduler, epoch):
    model.train()
    print('epoch ' + str(epoch))

    train_loss = 0.0
    train_acc = 0.0
    total = len(dataloader)
    start = time.time()
    toPilImage = transforms.ToPILImage(
    )  # transform tensor into PIL image to save

    for batch_num, (x, y) in enumerate(dataloader):
        x = x.to(device)
        y = y.to(device)

        # gauss noise training
        gauss_noise = torch.randn_like(x, device=device) * args.noise_sd
        # x_noise = x + torch.randn_like(x, device=device) * args.noise_sd

        # targeted noise training
        tmp_criterion = nn.CrossEntropyLoss()
        tmp_optimizer = optim.SGD(model.parameters(),
                                  lr=args.lr,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
        classifier = PyTorchClassifier(
            model=model,
            clip_values=(min_pixel_value, max_pixel_value),
            loss=tmp_criterion,
            optimizer=tmp_optimizer,
            input_shape=(3, 32, 32),
            nb_classes=10,
        )
        # generate random targets
        targets = art.utils.random_targets(y.cpu().numpy(), get_num_classes())

        # calculate loss gradient
        grad = classifier.loss_gradient(x=x.cpu().numpy(), y=targets) * (-1.0)
        scaled_grad = torch.Tensor(grad * args.eps_step).to(device)

        # print((scaled_grad.shape, gauss_noise.shape, targets.shape))

        # combine noise and targeted noise
        x_combine = x + (gauss_noise *
                         (1.0 - args.k_value)) + (scaled_grad * args.k_value)

        model.zero_grad()

        output = model(x_combine)
        loss = criterion(output, y)
        acc = accuracy(output, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        train_acc += acc

    scheduler.step()
    end = time.time()
    print('trainning time:', end - start, 'sec, loss: ', train_loss / total,
          'acc: ', train_acc / total)
    return train_loss / total, train_acc / total