Пример #1
0
    def load_model(self):
        if self.cuda:
            self.device = torch.device('cuda')
            cudnn.benchmark = True
        else:
            self.device = torch.device('cpu')

        self.model = LeNet().to(self.device)
        # self.model = AlexNet().to(self.device)
        # self.model = VGG11().to(self.device)
        # self.model = VGG13().to(self.device)
        # self.model = VGG16().to(self.device)
        # self.model = VGG19().to(self.device)
        # self.model = GoogLeNet().to(self.device)
        # self.model = resnet18().to(self.device)
        # self.model = resnet34().to(self.device)
        # self.model = resnet50().to(self.device)
        # self.model = resnet101().to(self.device)
        # self.model = resnet152().to(self.device)
        # self.model = DenseNet121().to(self.device)
        # self.model = DenseNet161().to(self.device)
        # self.model = DenseNet169().to(self.device)
        # self.model = DenseNet201().to(self.device)
        # self.model = WideResNet(depth=28, num_classes=10).to(self.device)

        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
                                                        milestones=[75, 150],
                                                        gamma=0.5)
        self.criterion = nn.CrossEntropyLoss().to(self.device)
Пример #2
0
    def test_lenet(self):
        from models import LeNet

        n_outputs = 10
        model = LeNet(num_classes=n_outputs)
        model.eval()
        x = torch.randn(20,3,32,32)
        outputs = model(x)

        self.assertTrue(outputs.shape[0] == x.shape[0])
        self.assertTrue(outputs.shape[1] == n_outputs)
Пример #3
0
 def _load_model(self):
     self.model = LeNet()
     self.current_iteration = 0
     if os.path.exists(self.args.model_path):
         try:
             print("Loading model from: {}".format(self.args.model_path))
             self.model.load_state_dict(torch.load(self.args.model_path))
             self.current_iteration = joblib.load("{}.iter".format(
                 self.args.model_path))
         except Exception as e:
             print(
                 "Exception: {}\nCould not load model from {} - starting from scratch"
                 .format(e, self.args.model_path))
Пример #4
0
    def test_get_mods(self):
        from models import LeNet
        from altmin import get_mods

        model = LeNet()
        model.eval()
        x = torch.randn(20, 3, 32, 32)
        outputs = model(x)

        model_mods = get_mods(model)

        self.assertTrue(
            len(model.features) + len(model.classifier) >= len(model_mods))
Пример #5
0
def alignment_lenet(augmentations):
    """Compute the kernel target alignment on LeNet. Since the feature map is
    initialized to be random and then trained, unlike kernels where feature map
    is fixed, kernel target alignment doesn't predict the accuracy at all.
    """
    for augmentation in augmentations:
        print(augmentation.name)
        model_base = LeNet().to(device)
        optimizer = sgd_opt_from_model(model_base)
        # Train LeNet for 1 epoch first
        _ = train_all_epochs(train_loader, valid_loader, model_base, optimizer, 1)
        model = LeNetAug().to(device)
        model.load_state_dict(model_base.state_dict())
        loader = loader_from_dataset(augmentation.dataset)
        print(kernel_target_alignment_augmented(loader, model))
Пример #6
0
    def test_get_codes(self):
        from models import LeNet
        from altmin import get_mods, get_codes

        model = LeNet()
        model.eval()
        x = torch.randn(20, 3, 32, 32)
        outputs = model(x)

        model_mods = get_mods(model)
        out1, codes = get_codes(model_mods, x)
        out2 = model_mods(x)

        self.assertAlmostEqual((outputs - out1).abs().mean().item(), 0)
        self.assertAlmostEqual((out1 - out2).abs().mean().item(), 0)
Пример #7
0
def measure_computation_fraction_lenet(train_loader):
    """Measure percentage of computation time spent in each layer of LeNet.
    """
    model = LeNet(n_channels=n_channels, size=32).to(device)
    loader = train_loader
    it = iter(loader)
    data, target = next(it)
    data, target = data.to(device), target.to(device)
Пример #8
0
def main(config):
    logger = prepare_logger(config)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # get loaders
    if not config.is_train_source:
        target_loader = get_loader(type="MNIST",
                                   train=False,
                                   batch_size=config.batch_size)

    source_train_loader = get_loader(type="SVHN",
                                     train=True,
                                     batch_size=config.batch_size)
    source_test_loader = get_loader(type="SVHN",
                                    train=False,
                                    batch_size=config.batch_size)

    # build source classifier
    model_src = LeNet(config.num_gpus).to(device)
    if (not config.is_train_source) or config.is_finetune:
        model_src.load_state_dict(torch.load(config.model_dir))

    # train source classifier
    if config.is_train_source:
        logger.info("train source classifier..")
        train_source(model_src, source_train_loader, source_test_loader,
                     config, logger)
        logger.info("evaluate source classifier..")
        logger.info("test accurracy in source domain: %f\n" %
                    (evaluate(model_src, source_test_loader)))

    else:
        # initialize target classifer with source classifer
        model_trg = torch.load(open("./pretrained/lenet-source.pth", "rb"))

        # build discriminator
        D = Discriminator(config.num_gpus)

        # adaptation process
        logger.info("start adaptation process..")
        adapt_target_domain(D, model_src, model_trg, source_train_loader,
                            target_loader, config)
        logger.info("evaluate target classifier..")
        logger.info("accurracy in target domain: %f\n" %
                    (evaluate(model_trg, target_loader)))
Пример #9
0
def pt2stru(pt):
    if "lenet" in pt:
        return LeNet()
    elif "l0net" in pt:
        return L0Net(mean=1)
    elif "VGG" in pt:
        if "l0" in pt:
            return L0VGG(cifar10_network, loc=g_mean, temp=g_temp)
        else:
            return VGG(cifar10_network)
Пример #10
0
def get_network(name, baseline=True, **kwargs):
    mean = kwargs.get("mean")
    temp = kwargs.get("temp")
    if name == "mnist":
        if baseline: return LeNet()
        else:
            try:
                return L0Net(mean=mean, temp=temp)
            except KeyError:
                print("No Key Named 'mean'")
    elif name == "cifar10":
        if baseline: return VGG(cifar10_network)
        else:
            return L0VGG(cifar10_network, loc=mean, temp=temp)
Пример #11
0
def get_model(name, device):
    """
    Returns required classifier and autoencoder
    :param name:
    :return: Autoencoder, Classifier
    """
    if name == 'lenet':
        model = LeNet(in_channels=channels).to(device)
    elif name == 'alexnet':
        model = AlexNet(channels=channels, num_classes=10).to(device)
    elif name == 'vgg':
        model = VGG(in_channels=channels, num_classes=10).to(device)

    autoencoder = CAE(in_channels=channels).to(device)
    return model, autoencoder
Пример #12
0
def main():
    args = parse_args()

    paths = Paths()
    checkpoints_path = str(paths.CHECKPOINTS_PATH)
    logging_path = str(paths.LOG_PATH)

    callbacks = [PrintCallback()]
    checkpoint_callback = ModelCheckpoint(filepath=checkpoints_path +
                                          '/{epoch}-{val_acc:.3f}',
                                          save_top_k=True,
                                          verbose=True,
                                          monitor='val_acc',
                                          mode='max',
                                          prefix='')
    early_stop_callback = EarlyStopping(monitor='val_acc',
                                        mode='max',
                                        verbose=False,
                                        strict=False,
                                        min_delta=0.0,
                                        patience=2)
    gpus = gpu_count()
    log_save_interval = args.log_save_interval
    logger = TensorBoardLogger(save_dir=logging_path, name='tuna-log')
    logger.log_hyperparams(args)
    max_epochs = args.epochs

    model = LeNet(hparams=args, paths=paths)
    trainer = Trainer(
        callbacks=callbacks,
        checkpoint_callback=checkpoint_callback,
        early_stop_callback=early_stop_callback,
        fast_dev_run=True,
        gpus=gpus,
        log_save_interval=log_save_interval,
        logger=logger,
        max_epochs=max_epochs,
        min_epochs=1,
        show_progress_bar=True,
        weights_summary='full',
    )
    trainer.fit(model)
Пример #13
0
def get_model(name, input_size=None, output=None):
    name = name.lower()
    if name == 'lenet-300-100':
        model = LeNet_300_100(input_size, output)
    elif name == 'lenet-5':
        model = LeNet(input_size, output)
    elif 'vgg' in name:
        # if 'bn' in name:
        if name == 'vgg11':
            model = vgg11(pretrained=False, num_classes=output)
        elif name == 'vgg16':
            model = vgg16(pretrained=False, num_classes=output)
        else:
            assert False

        for n, m in model.named_modules():
            if hasattr(m, 'bias') and not isinstance(m, _BatchNorm):
                if m.bias is not None:
                    if m.bias.sum() == 0:
                        m.bias = None

    elif 'alexnet' in name:
        model = AlexNet(num_classes=output)

        for n, m in model.named_modules():
            if hasattr(m, 'bias') and not isinstance(m, _BatchNorm):
                if m.bias is not None:
                    if m.bias.sum() == 0:
                        m.bias = None
    elif 'resnet' in name:
        if name == 'resnet20':
            model = resnet20(num_classes=output)
        elif name == 'resnet32':
            model = resnet32(num_classes=output)
        else:
            assert False

        for n, m in model.named_modules():
            if hasattr(m, 'bias') and not isinstance(m, _BatchNorm):
                if m.bias is not None:
                    if m.bias.sum() == 0:
                        m.bias = None

    else:
        assert False

    return model
Пример #14
0
class Solver(object):
    def __init__(self, config):
        self.model = None
        self.lr = config.lr
        self.epochs = config.epoch
        self.train_batch_size = config.trainBatchSize
        self.test_batch_size = config.testBatchSize
        self.criterion = None
        self.optimizer = None
        self.scheduler = None
        self.device = None
        self.cuda = config.cuda
        self.train_loader = None
        self.test_loader = None

    def load_data(self):
        train_transform = transforms.Compose(
            [transforms.RandomHorizontalFlip(),
             transforms.ToTensor()])
        test_transform = transforms.Compose([transforms.ToTensor()])
        train_set = torchvision.datasets.CIFAR10(root='./data',
                                                 train=True,
                                                 download=True,
                                                 transform=train_transform)
        self.train_loader = torch.utils.data.DataLoader(
            dataset=train_set, batch_size=self.train_batch_size, shuffle=True)
        test_set = torchvision.datasets.CIFAR10(root='./data',
                                                train=False,
                                                download=True,
                                                transform=test_transform)
        self.test_loader = torch.utils.data.DataLoader(
            dataset=test_set, batch_size=self.test_batch_size, shuffle=False)

    def load_model(self):
        if self.cuda:
            self.device = torch.device('cuda')
            cudnn.benchmark = True
        else:
            self.device = torch.device('cpu')

        self.model = LeNet().to(self.device)
        # self.model = AlexNet().to(self.device)
        # self.model = VGG11().to(self.device)
        # self.model = VGG13().to(self.device)
        # self.model = VGG16().to(self.device)
        # self.model = VGG19().to(self.device)
        # self.model = GoogLeNet().to(self.device)
        # self.model = resnet18().to(self.device)
        # self.model = resnet34().to(self.device)
        # self.model = resnet50().to(self.device)
        # self.model = resnet101().to(self.device)
        # self.model = resnet152().to(self.device)
        # self.model = DenseNet121().to(self.device)
        # self.model = DenseNet161().to(self.device)
        # self.model = DenseNet169().to(self.device)
        # self.model = DenseNet201().to(self.device)
        # self.model = WideResNet(depth=28, num_classes=10).to(self.device)

        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
                                                        milestones=[75, 150],
                                                        gamma=0.5)
        self.criterion = nn.CrossEntropyLoss().to(self.device)

    def train(self):
        print("train:")
        self.model.train()
        train_loss = 0
        train_correct = 0
        total = 0

        for batch_num, (data, target) in enumerate(self.train_loader):
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            output = self.model(data)
            feature = self.model.feature
            # print('output.shape = {}, target.shape = {}, feature.shape = {}'.format(output.size(), target.size(), feature.size()))
            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            prediction = torch.max(
                output,
                1)  # second param "1" represents the dimension to be reduced
            total += target.size(0)

            # train_correct incremented by one if predicted right
            train_correct += np.sum(
                prediction[1].cpu().numpy() == target.cpu().numpy())

            progress_bar(
                batch_num, len(self.train_loader),
                'Loss: %.4f | Acc: %.3f%% (%d/%d)' %
                (train_loss / (batch_num + 1), 100. * train_correct / total,
                 train_correct, total))

        return train_loss, train_correct / total

    def test(self):
        print("test:")
        self.model.eval()
        test_loss = 0
        test_correct = 0
        total = 0

        with torch.no_grad():
            for batch_num, (data, target) in enumerate(self.test_loader):
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)

                # CAM
                # feature = self.model.feature
                # print('feature: {}'.format(feature))

                loss = self.criterion(output, target)
                test_loss += loss.item()
                prediction = torch.max(output, 1)
                total += target.size(0)
                test_correct += np.sum(
                    prediction[1].cpu().numpy() == target.cpu().numpy())

                progress_bar(
                    batch_num, len(self.test_loader),
                    'Loss: %.4f | Acc: %.3f%% (%d/%d)' %
                    (test_loss / (batch_num + 1), 100. * test_correct / total,
                     test_correct, total))

        return test_loss, test_correct / total

    def save(self):
        model_out_path = "model.pth"
        torch.save(self.model, model_out_path)
        print("Checkpoint saved to {}".format(model_out_path))

    def run(self):
        self.load_data()
        print('Success loading data.')
        self.load_model()
        print('Success loading model.')
        accuracy = 0
        for epoch in range(1, self.epochs + 1):
            self.scheduler.step(epoch)
            print("\n===> epoch: %d/200" % epoch)
            train_result = self.train()
            print(train_result)
            test_result = self.test()
            accuracy = max(accuracy, test_result[1])
            if epoch == self.epochs:
                print("===> BEST ACC. PERFORMANCE: %.3f%%" % (accuracy * 100))
                self.save()
Пример #15
0
# test_type = 'single task classification'
test_type = 'multi tasks classification'

if test_type == 'single task classification':

    root = './data'
    download = True  # download MNIST dataset or not

    trans = transforms.Compose(
        [transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
    train_set = dset.MNIST(root=root, train=True,
                           transform=trans, download=download)
    test_set = dset.MNIST(root=root, train=False, transform=trans)

    model = LeNet()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

    model.compile(optimizer, criterion, metrics=['top1', 'top2'])

elif test_type == 'multi tasks classification':

    train_set = CategoricalDatasetMultiTasks(N, D_in, D_out1, D_out2)
    test_set = CategoricalDatasetMultiTasks(
        int(N * 0.25), D_in, D_out1, D_out2)

    model = MultiTasksClassification(D_in, H1, H2, D_out1, D_out2)

    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
Пример #16
0
                    cols=4):
    images = images.squeeze()  # 16x28x28
    plt.figure(figsize=(10, 12))
    #     plt.tight_layout()

    for i in range(batch_size):
        lbl = labels[i].item()
        true_lbl = true_labels[i].item()
        plt.subplot(rows, cols, i + 1)  # 1 ~ 4
        plt.imshow(images[i, :, :].numpy(), cmap='gray')
        plt.title('{} -> {}'.format(true_lbl, lbl))
        plt.axis('off')
    plt.savefig('../plot.png')
    # plt.show()


if __name__ == '__main__':
    model = LeNet()

    epoch = 12
    ckpt = torch.load('../ckpts/LeNet_e{:02}.pt'.format(epoch))
    model.load_state_dict(ckpt)

    _, dataset = get_dataset('mnist')
    imgs, true_labels = next(iter(dataset))
    labels = model(imgs)
    labels = torch.argmax(labels, dim=1)
    batch_size = dataset.batch_size

    plot_with_label(imgs, labels, true_labels, batch_size, batch_size**0.5,
                    batch_size**0.5)
Пример #17
0
def train(ARGS):
    # Define helper function for evaluating on test data during training
    def eval(epoch):
        from train_utils import clean_eval
        test_accuracy, test_loss, _ = clean_eval(sess, x, y, is_training,
                                                 testloader, n_classes, logits,
                                                 preds)
        # Write tensorboard summary
        acc_summary = tf.Summary()
        acc_summary.value.add(tag='Evaluation/accuracy/test',
                              simple_value=test_accuracy)
        writer_test.add_summary(acc_summary, epoch)

        # Write tensorboard summary
        err_summary = tf.Summary()
        err_summary.value.add(tag='Evaluation/error/test',
                              simple_value=1.0 - test_accuracy)
        writer_test.add_summary(err_summary, epoch)

        # Write tensorboard summary
        loss_summary = tf.Summary()
        loss_summary.value.add(tag='Evaluation/loss/test',
                               simple_value=test_loss)
        writer_test.add_summary(loss_summary, epoch)

    # Define helper function for evaluating on adversarial test data during training
    def adv_eval(epoch):
        from train_utils import adversarial_eval
        adv_accuracy, adv_loss = adversarial_eval(sess,
                                                  x,
                                                  y,
                                                  is_training,
                                                  adv_testloader,
                                                  n_classes,
                                                  preds,
                                                  adv_preds,
                                                  eval_all=True)

        # Write tensorboard summary
        acc_summary = tf.Summary()
        acc_summary.value.add(tag='Evaluation/adversarial-accuracy/test',
                              simple_value=adv_accuracy)
        writer_test.add_summary(acc_summary, epoch)

        # Write tensorboard summary
        err_summary = tf.Summary()
        err_summary.value.add(tag='Evaluation/adversarial-error/test',
                              simple_value=1.0 - adv_accuracy)
        writer_test.add_summary(err_summary, epoch)

        # Write tensorboard summary
        loss_summary = tf.Summary()
        loss_summary.value.add(tag='Evaluation/adversarial-loss/test',
                               simple_value=adv_loss)
        writer_test.add_summary(loss_summary, epoch)

    # Define computational graph
    with tf.Graph().as_default() as g:
        # Define placeholders
        with tf.device('/gpu:0'):
            with tf.name_scope('Placeholders'):
                x = tf.placeholder(dtype=tf.float32,
                                   shape=input_shape,
                                   name='inputs')
                x_pair1 = tf.placeholder(dtype=tf.float32,
                                         shape=input_shape,
                                         name='x-pair1')
                x_pair2 = tf.placeholder(dtype=tf.float32,
                                         shape=input_shape,
                                         name='x-pair2')
                y = tf.placeholder(dtype=tf.float32,
                                   shape=(None, n_classes),
                                   name='labels')
                is_training = tf.placeholder_with_default(True,
                                                          shape=(),
                                                          name='is-training')

        # Define TF session
        config = tf.ConfigProto(log_device_placement=False,
                                allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        sess = tf.Session(graph=g, config=config)

        # Define model
        with tf.name_scope('Model'):
            with tf.device('/gpu:0'):
                model = Model(nb_classes=n_classes,
                              input_shape=input_shape,
                              is_training=is_training)

                # Define forward-pass
                with tf.name_scope('Logits'):
                    logits = model.get_logits(x)
                with tf.name_scope('Probs'):
                    preds = tf.nn.softmax(logits)

                with tf.name_scope('Accuracy'):
                    ground_truth = tf.argmax(y, axis=1)
                    predicted_label = tf.argmax(preds, axis=1)
                    correct_prediction = tf.equal(predicted_label,
                                                  ground_truth)
                    acc = tf.reduce_mean(tf.to_float(correct_prediction),
                                         name='accuracy')
                    tf.add_to_collection('accuracies', acc)

                    err = tf.identity(1.0 - acc, name='error')
                    tf.add_to_collection('accuracies', err)

                # Define losses
                with tf.name_scope('Losses'):
                    ce_loss, wd_loss, clp_loss, lsq_loss, at_loss, alp_loss = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                    adv_logits = None

                    if ARGS.ct:
                        with tf.name_scope('Cross-Entropy-Loss'):
                            ce_loss = tf.reduce_mean(
                                tf.nn.softmax_cross_entropy_with_logits(
                                    logits=logits, labels=y),
                                name='cross-entropy-loss')

                            tf.add_to_collection('losses', ce_loss)

                    if ARGS.at:
                        with tf.name_scope('Adversarial-Cross-Entropy-Loss'):
                            at_loss, adv_logits = get_at_loss(
                                sess, x, y, model, ARGS.eps, ARGS.eps_iter,
                                ARGS.nb_iter)
                            at_loss = tf.identity(at_loss, name='at-loss')
                            tf.add_to_collection('losses', at_loss)

                    with tf.name_scope('Regularizers'):
                        if ARGS.wd:
                            with tf.name_scope('Weight-Decay'):
                                for var in tf.trainable_variables():
                                    if 'beta' in var.op.name:
                                        # Do not regularize bias of batch normalization
                                        continue
                                    # print('regularizing: ', var.op.name)
                                    wd_loss += tf.nn.l2_loss(var)

                                reg_loss = tf.identity(wd_loss, name='wd-loss')
                                tf.add_to_collection('losses', reg_loss)

                        if ARGS.alp:
                            with tf.name_scope('Adversarial-Logit-Pairing'):
                                alp_loss = get_alp_loss(
                                    sess, x, y, logits, adv_logits, model,
                                    ARGS.eps, ARGS.eps_iter, ARGS.nb_iter)

                                alp_loss = tf.identity(alp_loss,
                                                       name='alp-loss')
                                tf.add_to_collection('losses', alp_loss)

                        if ARGS.clp:
                            with tf.name_scope('Clean-Logit-Pairing'):
                                clp_loss = get_clp_loss(
                                    x_pair1, x_pair2, model)
                                clp_loss = tf.identity(clp_loss,
                                                       name='clp-loss')
                                tf.add_to_collection('losses', clp_loss)

                        if ARGS.lsq:
                            with tf.name_scope('Logit-Squeezing'):
                                lsq_loss = get_lsq_loss(x, model)
                                lsq_loss = tf.identity(lsq_loss,
                                                       name='lsq-loss')
                                tf.add_to_collection('losses', lsq_loss)

                    with tf.name_scope('Total-Loss'):
                        # Define objective function
                        total_loss = (ARGS.ct_lambda * ce_loss) + (
                            ARGS.at_lambda *
                            at_loss) + (ARGS.wd_lambda * wd_loss) + (
                                ARGS.clp_lambda *
                                clp_loss) + (ARGS.lsq_lambda * lsq_loss) + (
                                    ARGS.alp_lambda * alp_loss)

                        total_loss = tf.identity(total_loss, name='total-loss')
                        tf.add_to_collection('losses', total_loss)

                # Define PGD adversary
                with tf.name_scope('PGD-Attacker'):
                    pgd_params = {
                        'ord': np.inf,
                        'y': y,
                        'eps': ARGS.eps / 255,
                        'eps_iter': ARGS.eps_iter / 255,
                        'nb_iter': ARGS.nb_iter,
                        'rand_init': True,
                        'rand_minmax': ARGS.eps / 255,
                        'clip_min': 0.,
                        'clip_max': 1.,
                        'sanity_checks': True
                    }

                    pgd = ProjectedGradientDescent(model, sess=sess)
                    adv_x = pgd.generate(x, **pgd_params)

                    with tf.name_scope('Logits'):
                        adv_logits = model.get_logits(adv_x)
                    with tf.name_scope('Probs'):
                        adv_preds = tf.nn.softmax(adv_logits)

        # Define optimizer
        with tf.device('/gpu:0'):
            with tf.name_scope('Optimizer'):
                # Define global step variable
                global_step = tf.get_variable(
                    name='global_step',
                    shape=[],  # scalar
                    dtype=tf.float32,
                    initializer=tf.zeros_initializer(),
                    trainable=False)

                optimizer = tf.train.AdamOptimizer(learning_rate=ARGS.lr,
                                                   beta1=0.9,
                                                   beta2=0.999,
                                                   epsilon=1e-6,
                                                   use_locking=False,
                                                   name='Adam')
                trainable_vars = tf.trainable_variables()

                update_bn_ops = tf.get_collection(
                    tf.GraphKeys.UPDATE_OPS
                )  # this collection stores the moving_mean and moving_variance ops
                #  for batch normalization
                with tf.control_dependencies(update_bn_ops):
                    grads_and_vars = optimizer.compute_gradients(
                        total_loss, trainable_vars)
                    train_step = optimizer.apply_gradients(
                        grads_and_vars, global_step=global_step)

        # Add Tensorboard summaries
        with tf.device('/gpu:0'):
            # Create file writers
            writer_train = tf.summary.FileWriter(ARGS.log_dir + '/train',
                                                 graph=g)
            writer_test = tf.summary.FileWriter(ARGS.log_dir + '/test')

            # Add summary for input images
            with tf.name_scope('Image-Summaries'):
                # Create image summary ops
                tf.summary.image('input',
                                 x,
                                 max_outputs=2,
                                 collections=['training'])

            # Add summaries for the training losses
            losses = tf.get_collection('losses')
            for entry in losses:
                tf.summary.scalar(entry.name, entry, collections=['training'])

            # Add summaries for the training accuracies
            accs = tf.get_collection('accuracies')
            for entry in accs:
                tf.summary.scalar(entry.name, entry, collections=['training'])

            # Add summaries for all trainable vars
            for var in trainable_vars:
                tf.summary.histogram(var.op.name,
                                     var,
                                     collections=['training'])
                var_norm = tf.norm(var, ord='euclidean')
                tf.summary.scalar(var.op.name + '/l2norm',
                                  var_norm,
                                  collections=['training'])

            # Add summaries for variable gradients
            for grad, var in grads_and_vars:
                if grad is not None:
                    tf.summary.histogram(var.op.name + '/gradients',
                                         grad,
                                         collections=['training'])
                    grad_norm = tf.norm(grad, ord='euclidean')
                    tf.summary.scalar(var.op.name + '/gradients/l2norm',
                                      grad_norm,
                                      collections=['training'])

            # Add summaries for the logits and model predictions
            with tf.name_scope('Logits-Summaries'):
                variable_summaries(tf.identity(logits, name='logits'),
                                   name='logits',
                                   collections=['training', 'test'],
                                   histo=True)
            with tf.name_scope('Predictions-Summaries'):
                variable_summaries(tf.identity(preds, name='predictions'),
                                   name='predictions',
                                   collections=['training', 'test'],
                                   histo=True)

        # Initialize all variables
        with sess.as_default():
            tf.global_variables_initializer().run()

        # Collect training params
        train_params = {
            'epochs': ARGS.epochs,
            'eval_step': ARGS.eval_step,
            'adv_eval_step': ARGS.adv_eval_step,
            'n_classes': n_classes,
            'clp': ARGS.clp
        }

        # Start training loop
        model_train(sess,
                    x,
                    y,
                    x_pair1,
                    x_pair2,
                    is_training,
                    trainloader,
                    train_step,
                    args=train_params,
                    evaluate=eval,
                    adv_evaluate=adv_eval,
                    writer_train=writer_train)

        # Save the trained model
        if ARGS.save:
            save_path = os.path.join(ARGS.save_dir, ARGS.filename)
            saver = tf.train.Saver(var_list=tf.global_variables())
            saver.save(sess, save_path)
            print("Saved model at {:s}".format(str(ARGS.save_dir)))
 def model_gen_fun():
     model = LeNet(num_classes=1, num_channels=1).eval()
     return model
Пример #19
0
    torchvision.transforms.CenterCrop(28),  # 从图片中间切出224*224的图片
    torchvision.transforms.ToTensor(),  # 将图片(Image)转成Tensor, 归一化至[0, 1]
    torchvision.transforms.Normalize(mean=[.5, .5, .5],
                                     std=[.5, .5, .5])  # 标准化至[-1, 1], 规定均值和标准差
])

test_dataset = DOGCAT(root='../Pytorch-Tutorial/datasets/dogcat_2',
                      train=False,
                      transform=transform)

test_loader = Data.DataLoader(dataset=test_dataset,
                              batch_size=100,
                              shuffle=False,
                              num_workers=2)

net = LeNet()
print(net)

if os.path.isfile('saves/dogcat_lenet_params.pkl'):
    net.load_state_dict(torch.load('saves/dogcat_lenet_params.pkl'))
else:
    print("dogcat_lenet_params.pkl don't exists.")
    exit()

# Test the Model
total = 0
correct = 0
for images, labels in test_loader:
    images = Variable(images)
    outputs = net(images)
    _, predicted = torch.max(outputs.data, 1)
Пример #20
0
def main():
    # Data Loader (Input Pipeline)
    print('loading dataset...')
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               num_workers=args.num_workers,
                                               drop_last=False,
                                               shuffle=False)

    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=batch_size,
                                             num_workers=args.num_workers,
                                             drop_last=False,
                                             shuffle=False)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              num_workers=args.num_workers,
                                              drop_last=False,
                                              shuffle=False)
    # Define models
    print('building model...')
    if args.dataset == 'mnist':
        clf1 = LeNet()
    if args.dataset == 'fashionmnist':
        clf1 = resnet.ResNet18_F(10)
    if args.dataset == 'cifar10':
        clf1 = resnet.ResNet34(10)
    if args.dataset == 'svhn':
        clf1 = resnet.ResNet34(10)

    clf1.cuda()
    optimizer = torch.optim.SGD(clf1.parameters(),
                                lr=args.lr,
                                weight_decay=args.weight_decay)

    with open(txtfile, "a") as myfile:
        myfile.write('epoch train_acc val_acc test_acc\n')

    epoch = 0
    train_acc = 0
    val_acc = 0
    # evaluate models with random weights
    test_acc = evaluate(test_loader, clf1)
    print('Epoch [%d/%d] Test Accuracy on the %s test data: Model1 %.4f %%' %
          (epoch + 1, args.n_epoch_1, len(test_dataset), test_acc))
    # save results
    with open(txtfile, "a") as myfile:
        myfile.write(
            str(int(epoch)) + ' ' + str(train_acc) + ' ' + str(val_acc) + ' ' +
            str(test_acc) + ' ' + "\n")

    best_acc = 0.0
    # training
    for epoch in range(1, args.n_epoch_1):
        # train models
        clf1.train()
        train_acc = train(clf1, train_loader, epoch, optimizer,
                          nn.CrossEntropyLoss())
        # validation
        val_acc = evaluate(val_loader, clf1)
        # evaluate models
        test_acc = evaluate(test_loader, clf1)

        # save results
        print(
            'Epoch [%d/%d] Train Accuracy on the %s train data: Model %.4f %%'
            % (epoch + 1, args.n_epoch_1, len(train_dataset), train_acc))
        print('Epoch [%d/%d] Val Accuracy on the %s val data: Model %.4f %% ' %
              (epoch + 1, args.n_epoch_1, len(val_dataset), val_acc))
        print(
            'Epoch [%d/%d] Test Accuracy on the %s test data: Model %.4f %% ' %
            (epoch + 1, args.n_epoch_1, len(test_dataset), test_acc))
        with open(txtfile, "a") as myfile:
            myfile.write(
                str(int(epoch)) + ' ' + str(train_acc) + ' ' + str(val_acc) +
                ' ' + str(test_acc) + ' ' + "\n")

        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(clf1.state_dict(), model_save_dir + '/' + 'model.pth')

    print('Matrix Factorization is doing...')
    clf1.load_state_dict(torch.load(model_save_dir + '/' + 'model.pth'))
    A = respresentations_extract(train_loader, clf1, len(train_dataset),
                                 args.dim, batch_size)
    A_val = respresentations_extract(val_loader, clf1, len(val_dataset),
                                     args.dim, batch_size)
    A_total = np.append(A, A_val, axis=0)
    W_total, H_total, error = train_m(A_total, args.basis, args.iteration_nmf,
                                      1e-5)
    for i in range(W_total.shape[0]):
        for j in range(W_total.shape[1]):
            if W_total[i, j] < 1e-6:
                W_total[i, j] = 0.
    W = W_total[0:len(train_dataset), :]
    W_val = W_total[len(train_dataset):, :]
    print('Transition Matrix is estimating...Wating...')
    logits_matrix = probability_extract(train_loader, clf1, len(train_dataset),
                                        args.num_classes, batch_size)
    idx_matrix_group, transition_matrix_group = estimate_matrix(
        logits_matrix, model_save_dir)
    logits_matrix_val = probability_extract(val_loader, clf1, len(val_dataset),
                                            args.num_classes, batch_size)
    idx_matrix_group_val, transition_matrix_group_val = estimate_matrix(
        logits_matrix_val, model_save_dir)
    func = nn.MSELoss()

    model = Matrix_optimize(args.basis, args.num_classes)
    optimizer_1 = torch.optim.Adam(model.parameters(), lr=0.001)
    basis_matrix_group = basis_matrix_optimize(model, optimizer_1, args.basis,
                                               args.num_classes, W,
                                               transition_matrix_group,
                                               idx_matrix_group, func,
                                               model_save_dir, args.n_epoch_4)

    basis_matrix_group_val = basis_matrix_optimize(
        model, optimizer_1, args.basis, args.num_classes, W_val,
        transition_matrix_group_val, idx_matrix_group_val, func,
        model_save_dir, args.n_epoch_4)

    for i in range(basis_matrix_group.shape[0]):
        for j in range(basis_matrix_group.shape[1]):
            for k in range(basis_matrix_group.shape[2]):
                if basis_matrix_group[i, j, k] < 1e-6:
                    basis_matrix_group[i, j, k] = 0.

    optimizer_ = torch.optim.SGD(clf1.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay,
                                 momentum=args.momentum)

    best_acc = 0.0
    for epoch in range(1, args.n_epoch_2):
        # train model
        clf1.train()

        train_acc = train_correction(clf1, train_loader, epoch, optimizer_, W,
                                     basis_matrix_group, batch_size,
                                     args.num_classes, args.basis)
        # validation
        val_acc = val_correction(clf1, val_loader, epoch, W_val,
                                 basis_matrix_group_val, batch_size,
                                 args.num_classes, args.basis)

        # evaluate models
        test_acc = evaluate(test_loader, clf1)
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(clf1.state_dict(), model_save_dir + '/' + 'model.pth')
        with open(txtfile, "a") as myfile:
            myfile.write(
                str(int(epoch)) + ' ' + str(train_acc) + ' ' + str(val_acc) +
                ' ' + str(test_acc) + ' ' + "\n")
        # save results
        print(
            'Epoch [%d/%d] Train Accuracy on the %s train data: Model %.4f %%'
            % (epoch + 1, args.n_epoch_2, len(train_dataset), train_acc))
        print('Epoch [%d/%d] Val Accuracy on the %s val data: Model %.4f %% ' %
              (epoch + 1, args.n_epoch_2, len(val_dataset), val_acc))
        print(
            'Epoch [%d/%d] Test Accuracy on the %s test data: Model %.4f %% ' %
            (epoch + 1, args.n_epoch_2, len(test_dataset), test_acc))

    clf1.load_state_dict(torch.load(model_save_dir + '/' + 'model.pth'))
    optimizer_r = torch.optim.Adam(clf1.parameters(),
                                   lr=args.lr_revision,
                                   weight_decay=args.weight_decay)
    nn.init.constant_(clf1.T_revision.weight, 0.0)

    for epoch in range(1, args.n_epoch_3):
        # train models
        clf1.train()
        train_acc = train_revision(clf1, train_loader, epoch, optimizer_r, W,
                                   basis_matrix_group, batch_size,
                                   args.num_classes, args.basis)
        # validation
        val_acc = val_revision(clf1, val_loader, epoch, W_val,
                               basis_matrix_group, batch_size,
                               args.num_classes, args.basis)
        # evaluate models
        test_acc = evaluate(test_loader, clf1)
        with open(txtfile, "a") as myfile:
            myfile.write(
                str(int(epoch)) + ' ' + str(train_acc) + ' ' + str(val_acc) +
                ' ' + str(test_acc) + ' ' + "\n")

        # save results
        print(
            'Epoch [%d/%d] Train Accuracy on the %s train data: Model %.4f %%'
            % (epoch + 1, args.n_epoch_3, len(train_dataset), train_acc))
        print('Epoch [%d/%d] Val Accuracy on the %s val data: Model %.4f %% ' %
              (epoch + 1, args.n_epoch_3, len(val_dataset), val_acc))
        print(
            'Epoch [%d/%d] Test Accuracy on the %s test data: Model %.4f %% ' %
            (epoch + 1, args.n_epoch_3, len(test_dataset), test_acc))
Пример #21
0
def main():
    if not os.path.isdir(CHECKPOINT):
        os.makedirs(CHECKPOINT)

    print('==> Preparing dataset')

    trainloader, testloader = load_CIFAR(batch_size=BATCH_SIZE,
                                         num_workers=NUM_WORKERS)

    CLASSES = []
    AUROCs = []
    auroc = AverageMeter()

    for t, cls in enumerate(ALL_CLASSES):

        print('\nTask: [%d | %d]\n' % (t + 1, len(ALL_CLASSES)))

        CLASSES = [cls]

        print("==> Creating model")
        model = LeNet(num_classes=1)

        if CUDA:
            model = model.cuda()
            model = nn.DataParallel(model)
            cudnn.benchmark = True

        print('    Total params: %.2fK' %
              (sum(p.numel() for p in model.parameters()) / 1000))

        criterion = nn.BCELoss()
        optimizer = optim.SGD(model.parameters(),
                              lr=LEARNING_RATE,
                              momentum=MOMENTUM,
                              weight_decay=WEIGHT_DECAY)

        print("==> Learning")

        best_loss = 1e10
        learning_rate = LEARNING_RATE

        for epoch in range(EPOCHS):

            # decay learning rate
            if (epoch + 1) % EPOCHS_DROP == 0:
                learning_rate *= LR_DROP
                for param_group in optimizer.param_groups:
                    param_group['lr'] = learning_rate

            print('Epoch: [%d | %d]' % (epoch + 1, EPOCHS))

            train_loss = train(trainloader,
                               model,
                               criterion,
                               CLASSES,
                               CLASSES,
                               optimizer=optimizer,
                               use_cuda=CUDA)
            test_loss = train(testloader,
                              model,
                              criterion,
                              CLASSES,
                              CLASSES,
                              test=True,
                              use_cuda=CUDA)

            # save model
            is_best = test_loss < best_loss
            best_loss = min(test_loss, best_loss)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'loss': test_loss,
                    'optimizer': optimizer.state_dict()
                }, CHECKPOINT, is_best)

        print("==> Calculating AUROC")

        filepath_best = os.path.join(CHECKPOINT, "best.pt")
        checkpoint = torch.load(filepath_best)
        model.load_state_dict(checkpoint['state_dict'])

        new_auroc = calc_avg_AUROC(model, testloader, CLASSES, CLASSES, CUDA)
        auroc.update(new_auroc)

        print('New Task AUROC: {}'.format(new_auroc))
        print('Average AUROC: {}'.format(auroc.avg))

        AUROCs.append(auroc.avg)

    print('\nAverage Per-task Performance over number of tasks')
    for i, p in enumerate(AUROCs):
        print("%d: %f" % (i + 1, p))
Пример #22
0
def main():

    use_cuda = torch.cuda.is_available() and not args.no_cuda
    device = torch.device('cuda' if use_cuda else 'cpu')
    print(device)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if use_cuda:
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True

    rgb = False
    if args.mode == 'rgb':
        rgb = True

    if args.gray_scale:
        rgb = False

    if args.tracking_data_mod is True:
        args.input_size = 192

    # DATALOADER

    train_dataset = GesturesDataset(model=args.model, csv_path='csv_dataset', train=True, mode=args.mode, rgb=rgb,
                                    normalization_type=1,
                                    n_frames=args.n_frames, resize_dim=args.input_size,
                                    transform_train=args.train_transforms, tracking_data_mod=args.tracking_data_mod)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers)

    validation_dataset = GesturesDataset(model=args.model, csv_path='csv_dataset', train=False, mode=args.mode, rgb=rgb, normalization_type=1,
                                   n_frames=args.n_frames, resize_dim=args.input_size, tracking_data_mod=args.tracking_data_mod)
    validation_loader = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.n_workers)

    # paramteri per la rete

    in_channels = args.n_frames if not rgb else args.n_frames * 3
    n_classes = args.n_classes

    if args.model == 'LeNet':
        model = LeNet(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == 'AlexNet':
        model = AlexNet(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == 'AlexNetBN':
        model = AlexNetBN(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == "Vgg16":
        model = Vgg16(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == "Vgg16P":
        model = models.vgg16(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(3, 3), stride=1, padding=1)
        model.classifier._modules['6'] = nn.Linear(4096, n_classes)
        # model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "ResNet18P":
        model = models.resnet18(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model._modules['conv1'] = nn.Conv2d(in_channels, 64, 7, stride=2, padding=3)
        model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "ResNet34P":
        model = models.resnet34(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model._modules['conv1'] = nn.Conv2d(in_channels, 64, 7, stride=2, padding=3)
        model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "DenseNet121P":
        model = models.densenet121(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1024, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet161P":
        model = models.densenet161(pretrained=args.pretrained)
        # for params in model.parameters():
        #     params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=96, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=2208, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet169P":
        model = models.densenet169(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1664, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet201P":
        model = models.densenet201(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1920, out_features=n_classes, bias=True)
        model = model.to(device)
    # RNN
    elif args.model == 'LSTM' or args.model == 'GRU':
        model = Rnn(rnn_type=args.model, input_size=args.input_size, hidden_size=args.hidden_size,
                    batch_size=args.batch_size,
                    num_classes=args.n_classes, num_layers=args.n_layers,
                    final_layer=args.final_layer).to(device)
    # C3D

    elif args.model == 'C3D':
        if args.pretrained:
            model = C3D(rgb=rgb, num_classes=args.n_classes)


            # modifico parametri
            print('ok')

            model.load_state_dict(torch.load('c3d_weights/c3d.pickle', map_location=device), strict=False)
            # # for params in model.parameters():
            #     # params.requires_grad = False

            model.conv1 = nn.Conv3d(1 if not rgb else 3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
            # tolgo fc6 perchè 30 frames
            model.fc6 = nn.Linear(16384, 4096)  # num classes 28672 (112*200)
            model.fc7 = nn.Linear(4096, 4096)  # num classes
            model.fc8 = nn.Linear(4096, n_classes)  # num classes

            model = model.to(device)


    # Conv-lstm
    elif args.model == 'Conv-lstm':
        model = ConvLSTM(input_size=(args.input_size, args.input_size),
                         input_dim=1 if not rgb else 3,
                         hidden_dim=[64, 64, 128],
                         kernel_size=(3, 3),
                         num_layers=args.n_layers,
                         batch_first=True,
                         ).to(device)
    elif args.model == 'DeepConvLstm':
        model = DeepConvLstm(input_channels_conv=1 if not rgb else 3, input_size_conv=args.input_size, n_classes=12,
                             n_frames=args.n_frames, batch_size=args.batch_size).to(device)

    elif args.model == 'ConvGRU':
        model = ConvGRU(input_size=40, hidden_sizes=[64, 128],
                        kernel_sizes=[3, 3], n_layers=2).to(device)

    else:
        raise NotImplementedError

    if args.opt == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
        # optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum)

    elif args.opt == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    loss_function = nn.CrossEntropyLoss().to(device)

    start_epoch = 0
    if args.resume:
        checkpoint = torch.load("/projects/fabio/weights/gesture_recog_weights/checkpoint{}.pth.tar".format(args.model))
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']

        print("Resuming state:\n-epoch: {}\n{}".format(start_epoch, model))

    #name experiment
    personal_name = "{}_{}_{}".format(args.model, args.mode, args.exp_name)
    info_experiment = "{}".format(personal_name)
    log_dir = "/projects/fabio/logs/gesture_recog_logs/exps"
    weight_dir = personal_name
    log_file = open("{}/{}.txt".format("/projects/fabio/logs/gesture_recog_logs/txt_logs", personal_name), 'w')
    log_file.write(personal_name + "\n\n")
    if personal_name:
        exp_name = (("exp_{}_{}".format(time.strftime("%c"), personal_name)).replace(" ", "_")).replace(":", "-")
    else:
        exp_name = (("exp_{}".format(time.strftime("%c"), personal_name)).replace(" ", "_")).replace(":", "-")
    writer = SummaryWriter("{}".format(os.path.join(log_dir, exp_name)))

    # add info experiment
    writer.add_text('Info experiment',
                    "model:{}"
                    "\n\npretrained:{}"
                    "\n\nbatch_size:{}"
                    "\n\nepochs:{}"
                    "\n\noptimizer:{}"
                    "\n\nlr:{}"
                    "\n\ndn_lr:{}"
                    "\n\nmomentum:{}"
                    "\n\nweight_decay:{}"
                    "\n\nn_frames:{}"
                    "\n\ninput_size:{}"
                    "\n\nhidden_size:{}"
                    "\n\ntracking_data_mode:{}"
                    "\n\nn_classes:{}"
                    "\n\nmode:{}"
                    "\n\nn_workers:{}"
                    "\n\nseed:{}"
                    "\n\ninfo:{}"
                    "".format(args.model, args.pretrained, args.batch_size, args.epochs, args.opt, args.lr, args.dn_lr, args.momentum,
                              args.weight_decay, args.n_frames, args.input_size, args.hidden_size, args.tracking_data_mod,
                              args.n_classes, args.mode, args.n_workers, args.seed, info_experiment))

    trainer = Trainer(model=model, loss_function=loss_function, optimizer=optimizer, train_loader=train_loader,
                      validation_loader=validation_loader,
                      batch_size=args.batch_size, initial_lr=args.lr,  device=device, writer=writer, personal_name=personal_name, log_file=log_file,
                      weight_dir=weight_dir, dynamic_lr=args.dn_lr)


    print("experiment: {}".format(personal_name))
    start = time.time()
    for ep in range(start_epoch, args.epochs):
        trainer.train(ep)
        trainer.val(ep)

    # display classes results
    classes = ['g0', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7', 'g8', 'g9', 'g10', 'g11']
    for i in range(args.n_classes):
        print('Accuracy of {} : {:.3f}%%'.format(
            classes[i], 100 * trainer.class_correct[i] / trainer.class_total[i]))

    end = time.time()
    h, rem = divmod(end - start, 3600)
    m, s, = divmod(rem, 60)
    print("\nelapsed time (ep.{}):{:0>2}:{:0>2}:{:05.2f}".format(args.epochs, int(h), int(m), s))


    # writing accuracy on file

    log_file.write("\n\n")
    for i in range(args.n_classes):
        log_file.write('Accuracy of {} : {:.3f}%\n'.format(
            classes[i], 100 * trainer.class_correct[i] / trainer.class_total[i]))
    log_file.close()
        if not os.path.exists(directory):
            os.makedirs(directory)

    writer_loss = SummaryWriter(gen_path(loss_path))
    writer_acc = SummaryWriter(gen_path(acc_path))

    trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
    dataset_train = MNIST('./data/mnist/', train=True, download=True, transform=trans_mnist)
    dataset_test = MNIST('./data/mnist/', train=False, download=True, transform=trans_mnist)
    # sample users
    dict_users = split_noniid_shuffle(dataset_train, args.num_nodes)

    img_size = dataset_train[0][0].shape
    print(img_size)

    net_glob = LeNet().to(args.device)
    print(net_glob.fc1.weight.type())
    print(net_glob)
    net_glob.train()

    # copy weights
    w_glob = net_glob.state_dict()
    w_glob_grad = w_glob

    # training
    #loss_train = []
    
    w_locals = [w_glob for i in range(args.num_nodes)]

    for iter in range(args.epochs):
        loss_locals = []
Пример #24
0
    returns={'processed_labels'},
    resize=(img_height, img_width))

print("Number of images in the dataset:", train_dataset.get_n_samples())
print("Number of images in the dataset:", validation_dataset.get_n_samples())

# In[8]:

steps_per_epoch = train_dataset.get_n_samples() / train_batch_size
validation_steps = validation_dataset.get_n_samples() / validation_batch_size

# In[9]:

model = LeNet(n_classes=1,
              img_width=img_width,
              img_depth=img_depth,
              img_height=img_height,
              activation=activation)

# In[10]:

model.summary()

# In[11]:

optimizer = Adam(lr=0.001,
                 beta_1=0.9,
                 beta_2=0.999,
                 epsilon=None,
                 decay=0.00001,
                 amsgrad=True)
Пример #25
0
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
from test import test_img
from torch.utils.tensorboard import SummaryWriter

if __name__ == '__main__':
    args = args_parser()
    args.device = torch.device('cuda:{}'.format(args.gpu))

    batch_size = 256
    trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
    train_dataset = MNIST('./data/mnist/', train=True, download=True, transform=trans_mnist)
    test_dataset = MNIST('./data/mnist/', train=False, download=True, transform=trans_mnist)
    train_loader = DataLoader(train_dataset, batch_size=batch_size)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)
    model = LeNet().to(args.device)
    sgd = SGD(model.parameters(), lr=1e-1)
    cross_error = CrossEntropyLoss()
    epoch = 100

    writer = SummaryWriter('./runs/t_centerlize')
    for _epoch in range(epoch):
        epoch_loss = []
        for idx, (train_x, train_label) in enumerate(train_loader):
            train_x, train_label = train_x.to(args.device), train_label.to(args.device)
            #label_np = np.zeros((train_label.shape[0], 10))
            sgd.zero_grad()
            predict_y = model(train_x.float())
            _error = cross_error(predict_y, train_label.long())
            _error.backward()
            sgd.step()
Пример #26
0
testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=100,
                                         shuffle=False,
                                         num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

# Model
print('==> Building model..')
model = LeNet(3, len(classes)).to(device)

if device.type == 'cuda':
    model = torch.nn.DataParallel(model)
    cudnn.benchmark = True

if os.path.isfile(ckpt_file):
    checkpoint = torch.load(ckpt_file)
    model.load_state_dict(checkpoint['net'])
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']

criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(),
                      lr=1e-3,
                      momentum=0.9,
Пример #27
0
def main():
    opt = Parser(train=False).get()

    # dataset and data loader
    _, val_loader, adv_val_loader, _, num_classes = \
            load_dataset(opt.dataset, opt.batch_size, opt.data_root,
                         False, 0.0, opt.num_val_samples,
                         workers=4)

    # model
    if opt.arch == 'lenet':
        model = LeNet(num_classes)
    elif opt.arch == 'resnet':
        model = ResNetv2_20(num_classes)
    else:
        raise NotImplementedError

    # move model to device
    model.to(opt.device)

    # load trained weight
    try:
        model.load_state_dict(torch.load(opt.weight_path))
    except:
        model_weight = convert_model_from_parallel(opt.weight_path)
        model.load_state_dict(model_weight)

    # criterion
    criterion = nn.CrossEntropyLoss()

    # advertorch attacker
    if opt.attack == 'pgd':
        attacker = PGDAttack(model,
                             loss_fn=criterion,
                             eps=opt.eps / 255,
                             nb_iter=opt.num_steps,
                             eps_iter=opt.eps_iter / 255,
                             rand_init=True,
                             clip_min=opt.clip_min,
                             clip_max=opt.clip_max,
                             ord=np.inf,
                             targeted=False)
    else:
        raise NotImplementedError

    # trainer
    trainer = Trainer(opt, model, criterion, attacker)
    trainer.print_freq = -1

    # validation
    val_losses, val_acc1s, val_acc5s = \
        trainer.validate(val_loader)
    aval_losses, aval_acc1s, aval_acc5s = \
        trainer.adv_validate(adv_val_loader)

    print('[model] {}'.format(opt.weight_path))
    print('[standard]\n'
          'loss: {:.4f} | acc1: {:.2f}% | acc5: {:.2f}%'
          '\n[adversarial]\n'
          'loss: {:.4f} | acc1: {:.2f}% | acc5: {:.2f}%'.format(
              val_losses['val'].avg, val_acc1s['val'].avg,
              val_acc5s['val'].avg, aval_losses['aval'].avg,
              aval_acc1s['aval'].avg, aval_acc5s['aval'].avg))
Пример #28
0
train_loader = torch.utils.data.DataLoader(trainset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=4,
                                           pin_memory=True)
testset = datasets.MNIST(root=test_dir,
                         train=False,
                         download=True,
                         transform=test_transform)
test_loader = torch.utils.data.DataLoader(testset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          num_workers=4,
                                          pin_memory=True)

net = LeNet()
criterion = nn.CrossEntropyLoss()

if use_cuda:
    print('start move to cuda')
    torch.cuda.manual_seed_all(seed)
    cudnn.benchmark = True

    device = torch.device("cuda:0")
    net.to(device=device)
    criterion.to(device=device, dtype=dtype)

optimizer = optim.Adam(
    net.parameters(),
    lr=0.001,
)
Пример #29
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    feature_blob = np.zeros([1, 16, 8, 8])

    model = LeNet()

    def hook(module, input, output):
        global feature_blob
        feature_blob = output

    model._modules.get('conv2').register_forward_hook(hook)
    model = torch.nn.DataParallel(model).cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    for epoch in range(1, args.epochs + 1):
        train(args, model, train_loader, optimizer, epoch)
        cam(model, epoch)
        test(args, model, test_loader)

    generate_gif()
Пример #30
0
        args.dataset,
        batch_size=args.batch_size,
        conv_net=True,
        data_augmentation=args.data_augmentation,
        num_workers=num_workers)
    if args.data_augmentation:
        print('    data augmentation')

    window_size = train_loader.dataset.data[0].shape[0]
    if len(train_loader.dataset.data[0].shape) == 3:
        num_input_channels = train_loader.dataset.data[0].shape[2]
    else:
        num_input_channels = 1

    model = LeNet(num_input_channels=num_input_channels,
                  window_size=window_size,
                  bias=True).to(device)

# Multi-GPU
if num_workers > 1:
    model = nn.DataParallel(model)
criterion = nn.CrossEntropyLoss()

if __name__ == "__main__":

    # Save everything in a `ddict`
    SAV = ddict(args=args.__dict__)

    # Store training and test performance after each training epoch
    SAV.perf = ddict(tr=[], te=[])