Exemplo n.º 1
0
def main(args):
    import sys
    sys.path.append('/home/kp600168/semi')
    from allinone import SEMI_DATASET_REGISTRY, SEMI_TRAINER_REGISTRY
    print(args)
    mnist = SEMI_DATASET_REGISTRY('split')(MNIST,
                                           args.dataset,
                                           10, [
                                               tf.Resize((32, 32)),
                                               tf.ToTensor(),
                                               tf.Normalize((0.1307, ),
                                                            (0.3081, ))
                                           ],
                                           semi_size=args.semi_size)
    train_loader, test_loader, _, num_classes = mnist(args.batch_size,
                                                      num_workers=0,
                                                      return_num_classes=True)
    lenet = MODEL_REGISTRY(args.model)(num_classes=num_classes)
    kwargs = {
        'bn_list': [lenet.bn1, lenet.bn2, lenet.bn3],
        'sigma_list': [0.3, 0.3, 0.3],
        'v_list':
        [ConvTranspose2d(16, 6, 10, 2),
         ConvTranspose2d(120, 16, 10)],
        'lam_list': [0.1, 0.01, 0.01],
    }
    trainer = SEMI_TRAINER_REGISTRY('Ladder')(lenet,
                                              SGD(lr=args.lr_256 *
                                                  args.batch_size / 256,
                                                  momentum=0.9),
                                              F.cross_entropy,
                                              **kwargs,
                                              reporters=[TQDMReporter()])

    # for _ in trainer.epoch_range(args.epochs):
    #     trainer.train(train_loader)
    #     trainer.test(test_loader)

    trainer = SupervisedTrainer(lenet,
                                SGD(lr=args.lr_256 * args.batch_size / 256,
                                    momentum=0.9),
                                F.cross_entropy,
                                reporters=[TQDMReporter()])
    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
def main(args):
    cifar = SemiVisionSet(CIFAR10,
                          args.dataset,
                          10, [
                              tf.ToTensor(),
                              tf.Normalize((0.4914, 0.4822, 0.4465),
                                           (0.2023, 0.1994, 0.2010))
                          ],
                          semi_size=40000)
    train_loader, test_loader, num_classes = cifar(args.batch_size,
                                                   num_workers=4,
                                                   return_num_classes=True,
                                                   use_prefetcher=True)
    model_dict = {
        'generator_x': Generator_x(),
        'generator_z': Generator_z(),
        'discriminator_x': Discriminator_x(),
        'discriminator_z': Discriminator_z(),
        'discriminator_x_z': Discriminator_x_z(num_classes),
    }
    trainer = AdversariallyLearnedInferenceTrainerV2(
        model_dict,
        Adam(lr=args.lr_100 * args.batch_size / 100, betas=(0.5, 1 - 1e-3)),
        F.cross_entropy,
        1,
        reporters=[TQDMReporter(), TensorboardReporter('.')])

    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
Exemplo n.º 3
0
def main(args):
    change_val_to_unlabel()
    mnist = VisionSet(MNIST,
                      args.dataset,
                      10,
                      [tf.ToTensor(), tf.Normalize((0.1307,), (0.3081,))],
                      [tf.RandomResizedCrop(
                          (32, 32), (0.9, 1.0), (0.9, 1.1)), ],
                      [tf.Resize((32, 32)), ],
                      )
    mnist.unlabel_transform = TransformManyTimes(
        [tf.RandomResizedCrop((32, 32), (0.9, 1.0), (0.9, 1.1))],
        [tf.ToTensor(), tf.Normalize((0.1307,), (0.3081,))], 2)
    train_loader, test_loader, unlabel_loader, num_classes = mnist(
        args.batch_size, num_workers=0, return_num_classes=True, val_size=59900)

    model = LeNet5(num_classes=num_classes)

    trainer = MixMatchTrainer(model,
                              SGD(lr=args.lr_256 * args.batch_size /
                                  256, momentum=0.9),
                              F.cross_entropy,
                              0.5,
                              0.2,
                              1,
                              reporters=[TQDMReporter()],
                              )
    for _ in trainer.epoch_range(args.epochs):
        trainer.train(zip(train_loader,  cycle(unlabel_loader)))
        trainer.test(test_loader)
def main(args):
    mnist = SemiVisionSet(MNIST,
                          args.dataset,
                          10, [
                              tf.Resize((32, 32)),
                              tf.ToTensor(),
                              tf.Normalize((0.1307, ), (0.3081, ))
                          ],
                          semi_size=59900)
    # mnist.get_dataset = func
    train_loader, test_loader, num_classes = mnist(args.batch_size,
                                                   num_workers=4,
                                                   return_num_classes=True)
    # val_loader.transform = train_loader.transform
    lenet = LeNet5(num_classes=num_classes)
    trainer = InterpolationConsistencyTrainerV2(lenet,
                                                SGD(lr=args.lr_256 *
                                                    args.batch_size / 256,
                                                    momentum=0.9),
                                                F.cross_entropy,
                                                1e-5,
                                                0.99,
                                                0.02,
                                                reporters=[TQDMReporter()])
    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    # trainer = SupervisedTrainer(lenet, SGD(lr=args.lr_256 * args.batch_size /
    #                                        256, momentum=0.9), F.cross_entropy, reporters=[TQDMReporter()])
    # for _ in trainer.epoch_range(args.epochs):
    #     trainer.train(train_loader)
    #     trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
def main(args):
    import sys
    sys.path.append('/home/kp600168/semi')
    from allinone import SEMI_DATASET_REGISTRY, SEMI_TRAINER_REGISTRY, SCHEDULER_REGISTRY
    print(args)
    cifar = SEMI_DATASET_REGISTRY('mix')(CIFAR10,
                                         args.dataset,
                                         10, [
                                             tf.RandomResizedCrop((32, 32)),
                                             tf.ToTensor(),
                                             tf.Normalize(
                                                 (0.4914, 0.4822, 0.4465),
                                                 (0.2023, 0.1994, 0.2010))
                                         ],
                                         semi_size=args.semi_size)
    train_loader, test_loader, num_classes = cifar(
        args.batch_size, num_workers=args.num_workers, return_num_classes=True)
    model_dict = {
        'generator_x': MODEL_REGISTRY('Generator_x')(),
        'generator_z': MODEL_REGISTRY('Generator_z')(),
        'discriminator_x': MODEL_REGISTRY('Discriminator_x')(),
        'discriminator_z': MODEL_REGISTRY('Discriminator_z')(),
        'discriminator_x_z': MODEL_REGISTRY('Discriminator_x_z')(num_classes),
    }
    kwargs = {
        'model_dict':
        model_dict,
        'optimizer':
        Adam(lr=args.lr_100 * args.batch_size / 100, betas=(0.5, 1 - 1e-3)),
        'loss_f':
        F.cross_entropy,
        'consistency_weight':
        SCHEDULER_REGISTRY('identity')(1.),
    }
    trainer = SEMI_TRAINER_REGISTRY('AdversariallyLearnedInference')(
        **kwargs, reporters=[TQDMReporter(),
                             TensorboardReporter('.')])

    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    # trainer = SupervisedTrainer(lenet, SGD(lr=args.lr_256 * args.batch_size /
    #                                        256, momentum=0.9), F.cross_entropy, reporters=[TQDMReporter()])
    # for _ in trainer.epoch_range(args.epochs):
    #     trainer.train(train_loader)
    #     trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
Exemplo n.º 6
0
def main(args):
    import sys
    sys.path.append('/home/kp600168/semi')
    from allinone import SEMI_DATASET_REGISTRY, SEMI_TRAINER_REGISTRY, SCHEDULER_REGISTRY
    print(args)
    mnist = SEMI_DATASET_REGISTRY('mix')(MNIST,
                                         args.dataset,
                                         10, [
                                             tf.Resize((32, 32)),
                                             tf.ToTensor(),
                                             tf.Normalize((0.1307, ),
                                                          (0.3081, ))
                                         ],
                                         semi_size=args.semi_size)
    train_loader, test_loader, num_classes = mnist(
        args.batch_size, num_workers=args.num_workers, return_num_classes=True)
    lenet = MODEL_REGISTRY(args.model)(num_classes=num_classes)
    kwargs = {
        'model':
        MODEL_REGISTRY(args.model)(num_classes=num_classes),
        'optimizer':
        SGD(lr=args.lr_256 * args.batch_size / 256, momentum=0.9),
        'loss_f':
        F.cross_entropy,
        'consistency_weight':
        SCHEDULER_REGISTRY('identity')(1e-5),
        'alpha':
        SCHEDULER_REGISTRY('lambda')(lambda epoch: min(1 - 1 /
                                                       (1 + epoch), 0.99)),
        'beta':
        0.02
    }
    trainer = SEMI_TRAINER_REGISTRY('InterpolationConsistency')(
        **kwargs, reporters=[TQDMReporter()])

    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    # trainer = SupervisedTrainer(lenet, SGD(lr=args.lr_256 * args.batch_size /
    #                                        256, momentum=0.9), F.cross_entropy, reporters=[TQDMReporter()])
    # for _ in trainer.epoch_range(args.epochs):
    #     trainer.train(train_loader)
    #     trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
Exemplo n.º 7
0
def main(args):
    import sys
    sys.path.append('/home/kp600168/semi')
    from allinone import SEMI_DATASET_REGISTRY, SEMI_TRAINER_REGISTRY, SCHEDULER_REGISTRY, TRANSFORM_REGISTRY
    print(args)
    unlabel_transform = TRANSFORM_REGISTRY('ManyTimes')(tf.Compose([
        tf.RandomResizedCrop(((32, 32)), (0.9, 1.0), (0.9, 1.1)),
        tf.ToTensor(),
        tf.Normalize((0.1307, ), (0.3081, ))
    ]), 4)
    mnist = SEMI_DATASET_REGISTRY('split')(MNIST,
                                           args.dataset,
                                           10, [],
                                           [unlabel_transform.transform], [
                                               tf.Resize((32, 32)),
                                               tf.ToTensor(),
                                               tf.Normalize((0.1307, ),
                                                            (0.3081, ))
                                           ],
                                           semi_size=args.semi_size,
                                           unlabel_transform=unlabel_transform)

    train_loader, test_loader, unlabel_loader, num_classes = mnist(
        args.batch_size, num_workers=0, return_num_classes=True)
    kwargs = {
        'model': MODEL_REGISTRY(args.model)(num_classes=num_classes),
        'optimizer': SGD(lr=args.lr_256 * args.batch_size / 256, momentum=0.9),
        'loss_f': F.cross_entropy,
        'temperature': 0.5,
        'beta': 0.2,
        'consistency_weight': SCHEDULER_REGISTRY('identity')(1.),
        'dataset_type': 'split',
    }
    trainer = SEMI_TRAINER_REGISTRY('MixMatch')(**kwargs,
                                                reporters=[TQDMReporter()])

    for _ in trainer.epoch_range(args.epochs):
        trainer.train(zip(train_loader, unlabel_loader))
        trainer.test(test_loader)
    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
Exemplo n.º 8
0
def main(args):
    mnist = SemiVisionSet(MNIST,
                          args.dataset,
                          10, [
                              tf.Resize((32, 32)),
                              tf.ToTensor(),
                              tf.Normalize((0.1307, ), (0.3081, ))
                          ],
                          semi_size=59900)
    train_loader, test_loader, num_classes = mnist(args.batch_size,
                                                   num_workers=4,
                                                   return_num_classes=True)
    lenet = LeNet5(num_classes=num_classes)
    bn_list = [lenet.bn1, lenet.bn2, lenet.bn3]
    sigma_list = [0.3, 0.3, 0.3]
    v_list = [ConvTranspose2d(16, 6, 10, 2), ConvTranspose2d(120, 16, 10)]
    lam_list = [0.1, 0.01, 0.01]
    trainer = LadderTrainerV2(lenet,
                              SGD(lr=args.lr_256 * args.batch_size / 256,
                                  momentum=0.9),
                              F.cross_entropy,
                              bn_list,
                              sigma_list,
                              v_list,
                              lam_list,
                              reporters=[TQDMReporter()])

    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    # trainer = SupervisedTrainer(lenet, SGD(lr=args.lr_256 * args.batch_size /
    #                                        256, momentum=0.9), F.cross_entropy, reporters=[TQDMReporter()])
    # for _ in trainer.epoch_range(args.epochs):
    #     trainer.train(train_loader)
    #     trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
def main(args):
    import sys
    sys.path.append('/home/kp600168/semi')
    from allinone import SEMI_DATASET_REGISTRY, SEMI_TRAINER_REGISTRY, TRANSFORM_REGISTRY, SCHEDULER_REGISTRY
    print(args)
    unlabel_transform = TRANSFORM_REGISTRY('twice')(tf.Compose([
        tf.RandomResizedCrop(((32, 32)), (0.9, 1.0), (0.9, 1.1)),
        tf.ToTensor(),
        tf.Normalize((0.1307, ), (0.3081, ))
    ]))
    mnist = SEMI_DATASET_REGISTRY('mix')(MNIST,
                                         args.dataset,
                                         10, [], [unlabel_transform], [
                                             tf.Resize((32, 32)),
                                             tf.ToTensor(),
                                             tf.Normalize((0.1307, ),
                                                          (0.3081, ))
                                         ],
                                         semi_size=args.semi_size)
    train_loader, test_loader, num_classes = mnist(args.batch_size,
                                                   num_workers=4,
                                                   return_num_classes=True)
    kwargs = {
        'model':
        MODEL_REGISTRY(args.model)(num_classes=num_classes),
        'optimizer':
        SGD(lr=args.lr_256 * args.batch_size / 256, momentum=0.9),
        'loss_f':
        F.cross_entropy,
        'consistency_weight':
        SCHEDULER_REGISTRY('identity')(0.01),
        'alpha':
        SCHEDULER_REGISTRY('lambda')(lambda epoch: min(1 - 1 /
                                                       (1 + epoch), 0.99)),
        'dataset_type':
        'mix',
    }
    trainer = SEMI_TRAINER_REGISTRY('meanteacher')(**kwargs,
                                                   reporters=[TQDMReporter()])
    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    # mnist = SEMI_DATASET_REGISTRY('split')(MNIST, args.dataset, 10, [], [unlabel_transform.transform], [tf.Resize(
    #     (32, 32)), tf.ToTensor(), tf.Normalize((0.1307,), (0.3081,))], semi_size=args.semi_size, unlabel_transform=unlabel_transform)
    # train_loader, test_loader, unlabel_loader, num_classes = mnist(
    #     args.batch_size, num_workers=0, return_num_classes=True)
    # kwargs = {
    #     'model': MODEL_REGISTRY(args.model)(num_classes=num_classes),
    #     'optimizer': SGD(lr=args.lr_256 * args.batch_size / 256, momentum=0.9),
    #     'loss_f': F.cross_entropy,
    #     'consistency_weight': SCHEDULER_REGISTRY('identity')(0.01),
    #     'alpha': SCHEDULER_REGISTRY('lambda')(lambda epoch: min(1 - 1 / (1 + epoch), 0.99)),
    #     'dataset_type': 'split',
    # }
    # trainer = SEMI_TRAINER_REGISTRY('meanteacher')(
    #     **kwargs, reporters=[TQDMReporter()])
    # for _ in trainer.epoch_range(args.epochs):
    #     trainer.train(zip(train_loader, unlabel_loader))
    #     trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")
Exemplo n.º 10
0
def main(args):
    mnist = SemiVisionSet(MNIST, args.dataset, 10, [], [TransformTwice(tf.Compose([tf.RandomResizedCrop((32, 32), (0.9, 1.0), (0.9, 1.1)), tf.ToTensor(
    ), tf.Normalize((0.1307,), (0.3081,))]))], [tf.Resize((32, 32)), tf.ToTensor(), tf.Normalize((0.1307,), (0.3081,))], semi_size=59900)
    train_loader, test_loader, num_classes = mnist(
        args.batch_size, num_workers=4, return_num_classes=True)
    # val_loader.transform = train_loader.transform
    lenet = LeNet5(num_classes=num_classes)
    trainer = MeanTeacherTrainerV2(lenet, SGD(lr=args.lr_256 * args.batch_size /
                                              256, momentum=0.9), F.cross_entropy, 0.01, 0.99, reporters=[TQDMReporter()])
    for _ in trainer.epoch_range(args.epochs):
        trainer.train(train_loader)
        trainer.test(test_loader)

    print(f"Max Accuracy={max(trainer.history['accuracy/test'])}")