def main():
    args = parser.parse_args()
    model = None
    dtype = torch.cuda.FloatTensor

    if args.dataset == 'tiny_imagenet':
        dataloader = TinyImagenetDataLoader()
        num_classes = 200
    elif args.dataset == 'dogs':
        dataloader = DogsDataLoader()
        num_classes = 121
    else:
        dataloader = Cifar10DataLoader()
        num_classes = 10

    if args.model == 'resnet34':
        model = ResNet34(num_classes).type(dtype)
    elif args.model == 'resnet50':
        model = ResNet50(num_classes).type(dtype)
    elif args.model == 'resnet50-preact':
        model = ResNet50(num_classes, pre_activation=True).type(dtype)
    elif args.model == 'resnet50-imported':
        model = ResNetImported(num_classes).type(dtype)

    agent = Agent(model, dataloader)
    agent.load_checkpoint(args.checkpoint)
    # agent.check_accuracy()
    agent.print_accuracy()
Beispiel #2
0
def get_model(arch, num_classes, channels=3):
    """
    Args:
        arch: string, Network architecture
        num_classes: int, Number of classes
        channels: int, Number of input channels
    Returns:
        model, nn.Module, generated model
    """
    if arch.lower() == "resnet18":
        model = ResNet18(channels, num_classes)
    elif arch.lower() == "resnet34":
        model = ResNet34(channels, num_classes)
    elif arch.lower() == "resnet50":
        model = ResNet50(channels, num_classes)
    elif arch.lower() == "resnet101":
        model = ResNet101(channels, num_classes)
    elif arch.lower() == "resnet152":
        model = ResNet152(channels, num_classes)
    elif arch.lower() == "mobilenet_v1":
        model = MobileNetV1(num_classes, channels)
    elif arch.lower() == "mobilenet_v2":
        model = MobileNetV2(num_classes, channels)
    else:
        raise NotImplementedError(
            f"{arch} not implemented. "
            f"For supported architectures see documentation")
    return model
Beispiel #3
0
def get_model_for_training(model_name, device):
    if model_name == 'ResNet18':
        print("Model: ResNet18", file=sys.stderr)
        return ResNet18().to(device), ResNet18().to(device)
    elif model_name == 'ResNet34':
        print("Model: ResNet34", file=sys.stderr)
        return ResNet34().to(device), ResNet34().to(device)
    elif model_name == 'PreActResNet18':
        print("Model: PreActivate ResNet18", file=sys.stderr)
        return PreActResNet18().to(device), PreActResNet18().to(device)
    elif model_name == 'PreActResNet34':
        print("Model: PreActivate ResNet 34")
        return PreActResNet34().to(device), PreActResNet34().to(device)
    elif model_name == 'WideResNet28':
        print("Model: Wide ResNet28", file=sys.stderr)
        return WideResNet28().to(device), WideResNet28().to(device)
    elif model_name == 'WideResNet34':
        print("Model: Wide ResNet34", file=sys.stderr)
        return WideResNet34().to(device), WideResNet34().to(device)
Beispiel #4
0
    def instantiate_model(hyper_params):
        # modify this function if you want to change the model

        return ModularSVNHClassifier(
            cfg.MODEL,
            feature_transformation=ResNet34(
                hyper_params["FEATURES_OUTPUT_SIZE"]
            ),
            length_classifier=LengthClassifier(
                cfg.MODEL, hyper_params["FEATURES_OUTPUT_SIZE"]
            ),
            number_classifier=NumberClassifier,
            hyper_params=hyper_params,
        )
def get_encoder(config):
    if config['encoder_type'] == "vgg19":
        encoder = VGG19()
    elif config['encoder_type'] == "vgg19_bn":
        encoder = VGG19_bn()
    elif config['encoder_type'] == "resnet152":
        encoder = ResNet152()
    elif config['encoder_type'] == "resnet34":
        encoder = ResNet34()
    elif config['encoder_type'] == "resnet50":
        encoder = ResNet50()
    else:
        raise RuntimeError("invalid encoder type")

    return encoder
Beispiel #6
0
def get_net(network: str, num_classes) -> torch.nn.Module:
    return VGG('VGG16', num_classes=num_classes) if network == 'VGG16' else \
        ResNet34(num_classes=num_classes) if network == 'ResNet34' else \
        PreActResNet18(num_classes=num_classes) if network == 'PreActResNet18' else \
        GoogLeNet(num_classes=num_classes) if network == 'GoogLeNet' else \
        densenet_cifar(num_classes=num_classes) if network == 'densenet_cifar' else \
        ResNeXt29_2x64d(num_classes=num_classes) if network == 'ResNeXt29_2x64d' else \
        MobileNet(num_classes=num_classes) if network == 'MobileNet' else \
        MobileNetV2(num_classes=num_classes) if network == 'MobileNetV2' else \
        DPN92(num_classes=num_classes) if network == 'DPN92' else \
        ShuffleNetG2(num_classes=num_classes) if network == 'ShuffleNetG2' else \
        SENet18(num_classes=num_classes) if network == 'SENet18' else \
        ShuffleNetV2(1, num_classes=num_classes) if network == 'ShuffleNetV2' else \
        EfficientNetB0(
            num_classes=num_classes) if network == 'EfficientNetB0' else None
Beispiel #7
0
def get_classifier(mode, n_classes=10):
    if mode == 'resnet18':
        classifier = ResNet18(num_classes=n_classes)
    elif mode == 'resnet34':
        classifier = ResNet34(num_classes=n_classes)
    elif mode == 'resnet50':
        classifier = ResNet50(num_classes=n_classes)
    elif mode == 'resnet18_imagenet':
        classifier = resnet18(num_classes=n_classes)
    elif mode == 'resnet50_imagenet':
        classifier = resnet50(num_classes=n_classes)
    else:
        raise NotImplementedError()

    return classifier
Beispiel #8
0
def get_classifier(mode, n_classes=10):
    if mode == 'resnet18':
        classifier = ResNet18(num_classes=n_classes)
    elif mode == 'resnet34':
        classifier = ResNet34(num_classes=n_classes)
    elif mode == 'resnet50':
        classifier = ResNet50(num_classes=n_classes)
    elif mode == 'resnet18_imagenet':
        classifier = resnet18(num_classes=n_classes)
    elif mode == 'resnet50_imagenet':
        classifier = resnet50(num_classes=n_classes)
    elif mode == 'live':
        classifier = FeatherNet(input_size=128, se=True, avgdown=True)
#        classifier = ResNet18(num_classes=n_classes)
#        classifier = LiveModel()
    else:
        raise NotImplementedError()

    return classifier
def main():
    args = parser.parse_args()
    model = None
    scheduler = None

    if args.dataset == 'tiny_imagenet':
        dataloader = TinyImagenetDataLoader()
        num_classes = 200
    elif args.dataset == 'dogs':
        dataloader = DogsDataLoader()
        num_classes = 121
    else:
        dataloader = Cifar10DataLoader()
        num_classes = 10

    if args.model == 'resnet34':
        model = ResNet34(num_classes).cuda()
    elif args.model == 'resnet50':
        model = ResNet50(num_classes).cuda()
    elif args.model == 'resnet50-preact':
        model = ResNet50(num_classes, pre_activation=True).cuda()
    elif args.model == 'resnet50-imported':
        model = ResNetImported(num_classes).cuda()
    elif args.model == 'resnext29':
        # model = ResNext29(num_classes).cuda()
        model = resnext29_8x64d(num_classes).cuda()

    if args.optimizer == 'momentum':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=5,
                                              gamma=0.1)
    else:
        optimizer = optim.Adam(model.parameters(), lr=args.lr)

    loss_fn = nn.CrossEntropyLoss().cuda()
    agent = Agent(model, dataloader)
    agent.train(loss_fn, args.epochs, optimizer, scheduler, args.log_and_save)
Beispiel #10
0
def get_model(model):
    model_path = '../saved'
    if model == 'LeNet-5':
        net = LeNet()
        model_name = 'lenet.pth'
    elif model == 'VGG-16':
        net = Vgg16_Net()
        model_name = 'vgg16.pth'
    elif model == 'ResNet18':
        net = ResNet18()
        model_name = 'resnet18.pth'
    elif model == 'ResNet34':
        net = ResNet34()
        model_name = 'resnet34.pth'
    elif model == 'ResNet50':
        net = ResNet50()
        model_name = 'resnet50.pth'
    else:
        net = ResNet101()
        model_name = 'resnet101.pth'
    return net, os.path.join(model_path, model_name)
Beispiel #11
0
 def model_init(self, args):
     # Network
     if args.dataset == 'MNIST':
         print("MNIST")
         self.net = cuda(ToyNet_MNIST(y_dim=self.y_dim), self.cuda)
     elif args.dataset == 'CIFAR10':
         print("Dataset used CIFAR10")
         if args.network_choice == 'ToyNet':
             self.net = cuda(ToyNet_CIFAR10(y_dim=self.y_dim), self.cuda)
         elif args.network_choice == 'ResNet18':
             self.net = cuda(ResNet18(), self.cuda)
         elif args.network_choice == 'ResNet34':
             self.net = cuda(ResNet34(), self.cuda)
         elif args.network_choice == 'ResNet50':
             self.net = cuda(ResNet50(), self.cuda)
     self.net.weight_init(_type='kaiming')
     # setup optimizer
     self.optim = optim.Adam([{
         'params': self.net.parameters(),
         'lr': self.lr
     }],
                             betas=(0.5, 0.999))
Beispiel #12
0
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # models
    backbone = ResNet34(10, True)
    num_ftrs = backbone.fc_in
    rot_cls = nn.Sequential(nn.Flatten(), nn.Linear(num_ftrs, K))
    rot_cls.apply(weights_init_normal)

    model = nn.Sequential(backbone, rot_cls).to(device)
    CE = nn.CrossEntropyLoss().to(device)

    # loaders
    trainset = torchvision.datasets.CIFAR10(root='/disk1/CIFAR10',
                                            train=True,
                                            download=True,
                                            transform=transform_strongaug)
    testset = torchvision.datasets.CIFAR10(root='/disk1/CIFAR10',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    trainloader = DataLoader(trainset,
                             batch_size=batch_size,
                             shuffle=True,
                             collate_fn=collate,
                             num_workers=2)
    testloader = DataLoader(testset,
                            batch_size=batch_size,
                            shuffle=False,
                            collate_fn=collate,
                            num_workers=2)

    train(model,
          CE,
          device,
          trainloader,
          testloader,
          epoch=epoch,
          save_best=True)
Beispiel #13
0
def fetch_specified_model(model_name, activation):
    """
    Inits and returns the specified model
    """

    # Specific hard-coding for CIFAR100
    in_ch, num_classes = 3, 100
    act_fact = kdm.get_activation_factory(activation)

    if model_name == "basenet":
        model = BaseNet(in_ch, num_classes, act_fact)
    elif model_name == "resnet18":
        model = ResNet18(in_ch, num_classes, act_fact)
    elif model_name == "resnet34":
        model = ResNet34(in_ch, num_classes, act_fact)
    elif model_name == "mobnet2":
        model = MobileNetV2(in_ch, num_classes, act_fact)
    elif model_name == "sqnet":
        model = SqueezeNet(in_ch, num_classes, act_fact)
    else:
        assert False, "Unsupported base model: {}".format(model_name)

    return model
Beispiel #14
0
    def get_classifier(self, is_multi_class):
        if self.params.model == 'resnet18':
            from models.resnet import ResNet18
            classifier = ResNet18(num_classes=self.params.n_classes)
        elif self.params.model == 'resnet34':
            from models.resnet import ResNet34
            classifier = ResNet34(num_classes=self.params.n_classes)
        elif self.params.model == 'resnet50':
            from models.resnet import ResNet50
            classifier = ResNet50(num_classes=self.params.n_classes)
        elif self.params.model == 'resnet18_imagenet':
            if is_multi_class:
                from models.resnet_imagenet_multiclass_infer import resnet18
            else:
                from models.resnet_imagenet import resnet18
            classifier = resnet18(num_classes=self.params.n_classes)
        elif self.params.model == 'resnet50_imagenet':
            from models.resnet_imagenet import resnet50
            classifier = resnet50(num_classes=self.params.n_classes)
        else:
            raise NotImplementedError()

        return classifier
def calc_mi():
    parser = argparse.ArgumentParser(description='MINE robust model')
    parser.add_argument('--seed', default=9527, type=int)
    parser.add_argument('--epochs', default=21, type=int)
    parser.add_argument('--learning_rate', default=1e-3, type=float)
    parser.add_argument('--momentum', default=0.9, type=float)
    parser.add_argument('--weight_decay', default=1e-3, type=float)
    parser.add_argument('--h', default=True, type=bool)  # whether I(x, y): False or I(h, y): True
    parser.add_argument('--batch_size', default=5, type=int)
    args = parser.parse_args()

    device = "cuda" if torch.cuda.is_available() else "cpu"
    robust_model = 'checkpoint/adv_ckpt_41.pt'
    robust_net = wide_resnet_34_10()
    checkpoint = torch.load(robust_model)
    robust_net.load_state_dict(checkpoint['net'])
    robust_net.to(device)

    if args.h:
        in_channels = 1280  # 6 = 3 * 2 for MI(x, y), 1280 = 640 * 2 for MI(h, y)
    else:
        in_channels = 6

    resnet34 = ResNet34(in_channels, args.h).cuda()
    trainloader, testloader = get_cifar10(args.batch_size)
    optimizer = optim.Adam(resnet34.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    mi_lb_list = train(trainloader, testloader, robust_net, resnet34, optimizer, device, args.epochs, args.h)
    result_cor_ma = ma(mi_lb_list)
    print('h: {}, last MI: {}'.format(args.h, result_cor_ma[-1]))
    if args.h:
        with open('mi_hy_sung.txt', 'w') as filehandle:
            for listitem in result_cor_ma:
                filehandle.write('%s\n' % listitem)
    else:
        with open('mi_xy_sung.txt', 'w') as filehandle:
            for listitem in result_cor_ma:
                filehandle.write('%s\n' % listitem)
Beispiel #16
0
    # set up subsampled cifar10 train loader
    ##########################################
    trainsubset = get_subsample_dataset_label_noise(trainset,
                                                    permute_index[trial],
                                                    noise_size=args.noise_size)
    trainloader = torch.utils.data.DataLoader(trainsubset,
                                              batch_size=128,
                                              shuffle=True)

    ##########################################
    # set up model and optimizer
    ##########################################
    if args.arch == 'resnet18':
        net = ResNet18(width=args.width).cuda()
    elif args.arch == 'resnet34':
        net = ResNet34(width=args.width).cuda()
    elif args.arch == 'resnet50':
        net = ResNet50(width=args.width).cuda()
    elif args.arch == 'resnext':
        net = ResNeXt29(width=args.width).cuda()
    elif args.arch == 'resnext_1d':
        net = ResNeXt29_1d(width=args.width).cuda()
    elif args.arch == 'vgg':
        net = VGG11(width=args.width).cuda()
    elif args.arch == 'resnet26_bottle':
        net = ResNet26_bottle(width=args.width).cuda()
    elif args.arch == 'resnet38_bottle':
        net = ResNet38_bottle(width=args.width).cuda()
    elif args.arch == 'resnet50_bottle':
        net = ResNet50_bottle(width=args.width).cuda()
    else:
Beispiel #17
0
import tensorflow as tf

from dataset import Dataset
from models.simple_cnn import SimpleCNN
from models.alexnet import AlexNet
from models.vgg import VGG16, VGG19
from models.inception import InceptionV1, InceptionV3
from models.resnet import ResNet50, ResNet101, ResNet34
from trainer import Trainer

physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)

if __name__ == "__main__":
    image_shape = (224, 224, 3)
    num_classes = 1000

    mydata = Dataset()
    mynet = ResNet34(image_shape, num_classes)

    print(mynet.summary())
    tf.keras.utils.plot_model(mynet,
                              to_file="plots/ResNet34.png",
                              show_shapes=True)

    # mytrainer = Trainer(mynet)
    # mytrainer.train(mydata,epoches=5)
Beispiel #18
0
def eval_model(
    dataset_dir,
    metadata_filename,
    model_filename,
    model_cfg,
    batch_size=32,
    sample_size=-1,
):
    """
    Validation loop.

    Parameters
    ----------
    dataset_dir : str
        Directory with all the images.
    metadata_filename : str
        Absolute path to the metadata pickle file.
    model_filename : str
        path/filename where to save the model.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.

    Returns
    -------
    y_pred : ndarray
        Prediction of the model.

    """

    seed = 1234

    print("pytorch/random seed: {}".format(seed))
    np.random.seed(seed)
    random.seed(seed)
    torch.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    dataset_split = "test"
    # dataset_split = 'train'

    test_loader = prepare_dataloaders(
        dataset_split=dataset_split,
        dataset_path=dataset_dir,
        metadata_filename=metadata_filename,
        batch_size=batch_size,
        sample_size=sample_size,
        num_worker=0,
    )
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Device used: ", device)

    cfg_from_file(model_cfg)

    # Load best model
    model_dict = torch.load(model_filename, map_location=device)

    current_hyper_params_dict = model_dict["hyper_params"]
    model = ModularSVNHClassifier(
        cfg.MODEL,
        feature_transformation=ResNet34(
            current_hyper_params_dict["FEATURES_OUTPUT_SIZE"]
        ),
        length_classifier=LengthClassifier(
            cfg.MODEL, current_hyper_params_dict["FEATURES_OUTPUT_SIZE"]
        ),
        number_classifier=NumberClassifier,
        hyper_params=current_hyper_params_dict,
    )

    model.load_state_dict(model_dict["model_state_dict"])

    since = time.time()
    model = model.to(device)

    print("# Testing Model ... #")

    stats = StatsRecorder()

    performance_evaluator = PerformanceEvaluator(test_loader)

    y_pred, y_true = performance_evaluator.evaluate(
        model, device, stats, mode="test"
    )

    test_accuracy = stats.test_best_accuracy

    print("===============================")
    print("\n\nTest Set Accuracy: {}".format(test_accuracy))

    time_elapsed = time.time() - since

    print(
        "\n\nTesting complete in {:.0f}m {:.0f}s".format(
            time_elapsed // 60, time_elapsed % 60
        )
    )

    y_true = np.asarray(y_true)
    y_pred = np.asarray(y_pred)

    return y_pred
Beispiel #19
0
if args.method == 'ODEFree':
    ODEBlock = None

if args.network == 'sqnxt':
    net = SqNxt_23_1x(10, ODEBlock)
elif args.network == 'resnet18':
    net = ResNet18(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'resnet10':
    net = ResNet10(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'resnet4':
    net = ResNet4(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'resnet6':
    net = ResNet6(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'resnet34':
    net = ResNet34(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'preresnet18':
    net = PreResNet18(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'preresnet10':
    net = PreResNet10(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'preresnet4':
    net = PreResNet4(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'preresnet6':
    net = PreResNet6(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)
elif args.network == 'preresnet34':
    net = PreResNet34(ODEBlock, norm_layers, param_norm_layers, act_layers, args.inplanes)

net.apply(conv_init)
# logger.info(args)
# logger.info(net)
Beispiel #20
0
def main(args):

    check_path(args)

    # CIFAR-10的全部类别,一共10类
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    # 数据集
    data_builder = DataBuilder(args)
    dataSet = DataSet(data_builder.train_builder(),
                      data_builder.test_builder(), classes)

    # 选择模型
    if args.lenet:
        net = LeNet()
        model_name = args.name_le
    elif args.vgg:
        net = Vgg16_Net()
        model_name = args.name_vgg
    elif args.resnet18:
        net = ResNet18()
        model_name = args.name_res18
    elif args.resnet34:
        net = ResNet34()
        model_name = args.name_res34
    elif args.resnet50:
        net = ResNet50()
        model_name = args.name_res50
    elif args.resnet101:
        net = ResNet101()
        model_name = args.name_res101
    elif args.resnet152:
        net = ResNet152()
        model_name = args.name_res152

    # 交叉熵损失函数
    criterion = nn.CrossEntropyLoss()

    # SGD优化器
    optimizer = optim.SGD(net.parameters(),
                          lr=args.learning_rate,
                          momentum=args.sgd_momentum,
                          weight_decay=args.weight_decay)

    # 余弦退火调整学习率
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=150)

    # 模型的参数保存路径
    model_path = os.path.join(args.model_path, model_name)

    # 启动训练
    if args.do_train:
        print("Training...")

        trainer = Trainer(net, criterion, optimizer, scheduler,
                          dataSet.train_loader, dataSet.test_loader,
                          model_path, args)

        trainer.train(epochs=args.epoch)
        # t.save(net.state_dict(), model_path)

    # 启动测试,如果--do_train也出现,则用刚刚训练的模型进行测试
    # 否则就使用已保存的模型进行测试
    if args.do_eval:
        if not args.do_train and not os.path.exists(model_path):
            print(
                "Sorry, there's no saved model yet, you need to train first.")
            return
        # --do_eval
        if not args.do_train:
            checkpoint = t.load(model_path)
            net.load_state_dict(checkpoint['net'])
            accuracy = checkpoint['acc']
            epoch = checkpoint['epoch']
            print("Using saved model, accuracy : %f  epoch: %d" %
                  (accuracy, epoch))
        tester = Tester(dataSet.test_loader, net, args)
        tester.test()

    if args.show_model:
        if not os.path.exists(model_path):
            print(
                "Sorry, there's no saved model yet, you need to train first.")
            return
        show_model(args)

    if args.do_predict:
        device = t.device("cuda" if t.cuda.is_available() else "cpu")
        checkpoint = t.load(model_path, map_location=device)
        net.load_state_dict(checkpoint['net'])
        predictor = Predictor(net, classes)
        img_path = 'test'
        img_name = [os.path.join(img_path, x) for x in os.listdir(img_path)]
        for img in img_name:
            predictor.predict(img)
Beispiel #21
0
    num_classes = 100

train_loader = torch.utils.data.DataLoader(train_data,
                                           batch_size=args.test_bs,
                                           shuffle=True,
                                           num_workers=args.prefetch,
                                           pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data,
                                          batch_size=args.test_bs,
                                          shuffle=False,
                                          num_workers=args.prefetch,
                                          pin_memory=True)

# Create model
if 'resnet' in args.method_name:
    net = ResNet34(num_c=num_classes)
else:
    net = DenseNet3(depth=100, num_classes=num_classes)

# Restore model
assert args.load != ''
model_name = os.path.join(args.load, args.method_name + '.pth')
net.load_state_dict(torch.load(model_name))
print('Model restored! File:', model_name)

net.eval()

if args.ngpu > 1:
    net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

if args.ngpu > 0:
Beispiel #22
0
def build_model(net='MobileNet',
                input_shape=(224, 224, 3),
                siamese_weights=None,
                share=True):
    if net == 'MobileNet':
        base_model = MobileNet(include_top=False, input_shape=input_shape)
    elif net == 'MobileNetV2':
        base_model = MobileNetV2(include_top=False, input_shape=input_shape)
    elif net == 'NASNetMobile':
        base_model = NASNetMobile(include_top=False, input_shape=input_shape)
    elif net == 'ResNet18':
        base_model = ResNet18(include_top=False, input_shape=input_shape)
    elif net == 'ResNet18V2':
        base_model = ResNet18V2(include_top=False, input_shape=input_shape)
    elif net == 'ResNet34':
        base_model = ResNet34(include_top=False, input_shape=input_shape)
    elif net == 'ResNet34V2':
        base_model = ResNet34V2(include_top=False, input_shape=input_shape)
    elif net == 'DenseNet21':
        base_model = DenseNet(include_top=False,
                              blocks=[2, 2, 2, 2],
                              input_shape=input_shape,
                              name='a')
        if share == False:
            base_model_b = DenseNet(include_top=False,
                                    blocks=[2, 2, 2, 2],
                                    input_shape=input_shape,
                                    name='b')
    elif net == 'DenseNet69':
        base_model = DenseNet(include_top=False,
                              blocks=[6, 8, 10, 8],
                              input_shape=input_shape)
        if share == False:
            base_model_b = DenseNet(include_top=False,
                                    blocks=[6, 8, 10, 8],
                                    input_shape=input_shape,
                                    name='b')
    elif net == 'DenseNet109':
        base_model = DenseNet(include_top=False,
                              blocks=[6, 12, 18, 16],
                              input_shape=input_shape)
        if share == False:
            base_model_b = DenseNet(include_top=False,
                                    blocks=[6, 12, 18, 16],
                                    input_shape=input_shape,
                                    name='b')
    elif net == 'DenseShuffleV1_57_373':
        base_model = DenseShuffleV1(include_top=False,
                                    blocks=[6, 8, 12],
                                    input_shape=input_shape,
                                    num_shuffle_units=[3, 7, 3],
                                    scale_factor=1.0,
                                    bottleneck_ratio=1,
                                    dropout_rate=0.5)
    elif net == 'DenseShuffleV2_57_373':
        base_model = DenseShuffleV2(include_top=False,
                                    blocks=[6, 8, 12],
                                    input_shape=input_shape,
                                    num_shuffle_units=[3, 7, 3],
                                    scale_factor=1.0,
                                    bottleneck_ratio=1,
                                    dropout_rate=0.5)
    elif net == 'DenseShuffleV2_49_353':
        base_model = DenseShuffleV2(include_top=False,
                                    blocks=[6, 8, 8],
                                    input_shape=input_shape,
                                    num_shuffle_units=[3, 5, 3],
                                    scale_factor=1.0,
                                    bottleneck_ratio=1,
                                    dropout_rate=0.5)
    elif net == 'DenseShuffleV2_17_232':
        base_model = DenseShuffleV2(include_top=False,
                                    blocks=[2, 2, 2],
                                    input_shape=input_shape,
                                    num_shuffle_units=[2, 3, 2],
                                    scale_factor=1.0,
                                    bottleneck_ratio=1,
                                    dropout_rate=0.5)
    elif net == 'ShuffleNetV2':
        base_model = ShuffleNetV2(include_top=False,
                                  scale_factor=1.0,
                                  pooling='avg',
                                  input_shape=input_shape,
                                  num_shuffle_units=[3, 7, 3],
                                  bottleneck_ratio=1)
    elif net == 'ShuffleNet':
        base_model = ShuffleNet(include_top=False,
                                scale_factor=1.0,
                                pooling='avg',
                                input_shape=input_shape,
                                num_shuffle_units=[3, 7, 3],
                                bottleneck_ratio=1)
    elif net == 'MobileNetV3Small':
        base_model = MobileNetV3Small(include_top=False,
                                      input_shape=input_shape)
    elif net == 'SqueezeNet':
        base_model = SqueezeNet(include_top=False, input_shape=input_shape)
    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    input_a = keras.layers.Input(shape=input_shape, name='input_a')
    input_b = keras.layers.Input(shape=input_shape, name='input_b')
    processed_a = base_model(input_a)

    if share:
        processed_b = base_model(input_b)
    else:
        processed_b = base_model_b(input_b)

    #processed_a = keras.layers.Activation('sigmoid', name='sigmoid_a')(processed_a)
    #processed_b = keras.layers.Activation('sigmoid', name='sigmoid_b')(processed_b)
    normalize = keras.layers.Lambda(lambda x: K.l2_normalize(x, axis=-1),
                                    name='normalize')
    processed_a = normalize(processed_a)
    processed_b = normalize(processed_b)
    distance = keras.layers.Lambda(euclidean_distance,
                                   output_shape=eucl_dist_output_shape,
                                   name='dist')([processed_a, processed_b])
    model = keras.models.Model([input_a, input_b], distance)
    if siamese_weights is not None:
        print('load siamses weights ....')
        model.load_weights(siamese_weights)
    print('hahahaha')
    return model
Beispiel #23
0
def main():
    torch.manual_seed(options['seed'])
    os.environ['CUDA_VISIBLE_DEVICES'] = options['gpu']
    use_gpu = torch.cuda.is_available()
    if options['use_cpu']: use_gpu = False

    feat_dim = 2 if 'cnn' in options['model'] else 512

    options.update(
        {
            'feat_dim': feat_dim,
            'use_gpu': use_gpu
        }
    )

    if use_gpu:
        print("Currently using GPU: {}".format(options['gpu']))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(options['seed'])
    else:
        print("Currently using CPU")

    dataset = datasets.create(options['dataset'], **options)
    out_dataset = datasets.create(options['out_dataset'], **options)

    trainloader, testloader = dataset.trainloader, dataset.testloader
    outloader = out_dataset.testloader

    options.update(
        {
            'num_classes': dataset.num_classes
        }
    )

    print("Creating model: {}".format(options['model']))
    if 'cnn' in options['model']:
        net = ConvNet(num_classes=dataset.num_classes)
    else:
        if options['cs']:
            net = resnet34ABN(num_classes=dataset.num_classes, num_bns=2)
        else:
            net = ResNet34(dataset.num_classes)


    if options['cs']:
        print("Creating GAN")
        nz = options['nz']
        netG = gan.Generator32(1, nz, 64, 3) # ngpu, nz, ngf, nc
        netD = gan.Discriminator32(1, 3, 64) # ngpu, nc, ndf
        fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
        criterionD = nn.BCELoss()

    Loss = importlib.import_module('loss.'+options['loss'])
    criterion = getattr(Loss, options['loss'])(**options)

    if use_gpu:
        net = nn.DataParallel(net, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda()
        criterion = criterion.cuda()
        if options['cs']:
            netG = nn.DataParallel(netG, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda()
            netD = nn.DataParallel(netD, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda()
            fixed_noise.cuda()
    
    model_path = os.path.join(options['outf'], 'models', options['dataset'])
    file_name = '{}_{}_{}_{}_{}'.format(options['model'], options['dataset'], options['loss'], str(options['weight_pl']), str(options['cs']))
    if options['eval']:
        net, criterion = load_networks(net, model_path, file_name, criterion=criterion)
        results = test(net, criterion, testloader, outloader, epoch=0, **options)
        print("Acc (%): {:.3f}\t AUROC (%): {:.3f}\t OSCR (%): {:.3f}\t".format(results['ACC'], results['AUROC'], results['OSCR']))
        return

    params_list = [{'params': net.parameters()},
                {'params': criterion.parameters()}]
    optimizer = torch.optim.Adam(params_list, lr=options['lr'])
    if options['cs']:
        optimizerD = torch.optim.Adam(netD.parameters(), lr=options['gan_lr'], betas=(0.5, 0.999))
        optimizerG = torch.optim.Adam(netG.parameters(), lr=options['gan_lr'], betas=(0.5, 0.999))
 
    if options['stepsize'] > 0:
        scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[30, 60, 90, 120])

    start_time = time.time()

    score_now = 0.0
    for epoch in range(options['max_epoch']):
        print("==> Epoch {}/{}".format(epoch+1, options['max_epoch']))

        if options['cs']:
            train_cs(net, netD, netG, criterion, criterionD,
                optimizer, optimizerD, optimizerG,
                trainloader, epoch=epoch, **options)

        train(net, criterion, optimizer, trainloader, epoch=epoch, **options)

        if options['eval_freq'] > 0 and (epoch+1) % options['eval_freq'] == 0 or (epoch+1) == options['max_epoch']:
            print("==> Test")
            results = test(net, criterion, testloader, outloader, epoch=epoch, **options)
            print("Acc (%): {:.3f}\t AUROC (%): {:.3f}\t OSCR (%): {:.3f}\t".format(results['ACC'], results['AUROC'], results['OSCR']))

            save_networks(net, model_path, file_name, criterion=criterion)
            if options['cs']: 
                save_GAN(netG, netD, model_path, file_name)
                fake = netG(fixed_noise)
                GAN_path = os.path.join(model_path, 'samples')
                mkdir_if_missing(GAN_path)
                vutils.save_image(fake.data, '%s/gan_samples_epoch_%03d.png'%(GAN_path, epoch), normalize=True)

        if options['stepsize'] > 0: scheduler.step()

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Beispiel #24
0
def get_network(name: str, num_classes: int) -> None:
    return \
        AlexNet(
            num_classes=num_classes) if name == 'AlexNet' else\
        DenseNet201(
            num_classes=num_classes) if name == 'DenseNet201' else\
        DenseNet169(
            num_classes=num_classes) if name == 'DenseNet169' else\
        DenseNet161(
            num_classes=num_classes) if name == 'DenseNet161' else\
        DenseNet121(
            num_classes=num_classes) if name == 'DenseNet121' else\
        DenseNet121CIFAR(
            num_classes=num_classes) if name == 'DenseNet121CIFAR' else\
        GoogLeNet(
            num_classes=num_classes) if name == 'GoogLeNet' else\
        InceptionV3(
            num_classes=num_classes) if name == 'InceptionV3' else\
        MNASNet_0_5(
            num_classes=num_classes) if name == 'MNASNet_0_5' else\
        MNASNet_0_75(
            num_classes=num_classes) if name == 'MNASNet_0_75' else\
        MNASNet_1(
            num_classes=num_classes) if name == 'MNASNet_1' else\
        MNASNet_1_3(
            num_classes=num_classes) if name == 'MNASNet_1_3' else\
        MobileNetV2(
            num_classes=num_classes) if name == 'MobileNetV2' else\
        ResNet18(
            num_classes=num_classes) if name == 'ResNet18' else\
        ResNet34(
            num_classes=num_classes) if name == 'ResNet34' else\
        ResNet34CIFAR(
            num_classes=num_classes) if name == 'ResNet34CIFAR' else\
        ResNet50CIFAR(
            num_classes=num_classes) if name == 'ResNet50CIFAR' else\
        ResNet101CIFAR(
            num_classes=num_classes) if name == 'ResNet101CIFAR' else\
        ResNet18CIFAR(
            num_classes=num_classes) if name == 'ResNet18CIFAR' else\
        ResNet50(
            num_classes=num_classes) if name == 'ResNet50' else\
        ResNet101(
            num_classes=num_classes) if name == 'ResNet101' else\
        ResNet152(
            num_classes=num_classes) if name == 'ResNet152' else\
        ResNeXt50(
            num_classes=num_classes) if name == 'ResNext50' else\
        ResNeXtCIFAR(
            num_classes=num_classes) if name == 'ResNeXtCIFAR' else\
        ResNeXt101(
            num_classes=num_classes) if name == 'ResNext101' else\
        WideResNet50(
            num_classes=num_classes) if name == 'WideResNet50' else\
        WideResNet101(
            num_classes=num_classes) if name == 'WideResNet101' else\
        ShuffleNetV2_0_5(
            num_classes=num_classes) if name == 'ShuffleNetV2_0_5' else\
        ShuffleNetV2_1(
            num_classes=num_classes) if name == 'ShuffleNetV2_1' else\
        ShuffleNetV2_1_5(
            num_classes=num_classes) if name == 'ShuffleNetV2_1_5' else\
        ShuffleNetV2_2(
            num_classes=num_classes) if name == 'ShuffleNetV2_2' else\
        SqueezeNet_1(
            num_classes=num_classes) if name == 'SqueezeNet_1' else\
        SqueezeNet_1_1(
            num_classes=num_classes) if name == 'SqueezeNet_1_1' else\
        VGG11(
            num_classes=num_classes) if name == 'VGG11' else\
        VGG11_BN(
            num_classes=num_classes) if name == 'VGG11_BN' else\
        VGG13(
            num_classes=num_classes) if name == 'VGG13' else\
        VGG13_BN(
            num_classes=num_classes) if name == 'VGG13_BN' else\
        VGG16(
            num_classes=num_classes) if name == 'VGG16' else\
        VGG16_BN(
            num_classes=num_classes) if name == 'VGG16_BN' else\
        VGG19(
            num_classes=num_classes) if name == 'VGG19' else\
        VGG19_BN(
            num_classes=num_classes) if name == 'VGG19_BN' else \
        VGGCIFAR('VGG16',
                 num_classes=num_classes) if name == 'VGG16CIFAR' else \
        EfficientNetB4(
            num_classes=num_classes) if name == 'EfficientNetB4' else \
        EfficientNetB0CIFAR(
            num_classes=num_classes) if name == 'EfficientNetB0CIFAR' else\
        None
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

# Model
if args.resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load('./checkpoint/ckpt.t7')
    net = checkpoint['net']
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']
else:
    print('==> Building model..')
    # net = VGG('VGG19')
    net = ResNet34(10)
    #net = PreActResNet18()
    # net = GoogLeNet()
    # net = DenseNet121()
    # net = ResNeXt29_2x64d()
    # net = MobileNet()
    # net = DPN92()
    # net = ShuffleNetG2()
    # net = SENet18()

if use_cuda:
    net.cuda()
    net = torch.nn.DataParallel(net,
                                device_ids=range(torch.cuda.device_count()))
    cudnn.benchmark = True
Beispiel #26
0
    def select(self, model, path_fc=False, upsample='pixel'):
        if model == 'cnn':
            net = SimpleModel(
                in_shape=self.in_shape,
                activation=self.activation,
                num_classes=self.num_classes,
                filters=self.filters,
            )
        else:
            assert (self.dataset != 'MNIST' and self.dataset != 'Fashion-MNIST'
                    ), "Cannot use resnet or densenet for mnist style data"
            if model == 'resnet':
                assert self.resdepth in [
                    18, 34, 50, 101, 152
                ], "Non-standard and unsupported resnet depth ({})".format(
                    self.resdepth)
                if self.resdepth == 18:
                    net = ResNet18()
                elif self.resdepth == 34:
                    net = ResNet34()
                elif self.resdepth == 50:
                    net = ResNet50()
                elif self.resdepth == 101:
                    net = ResNet101()
                else:
                    net = ResNet152()
            elif model == 'densenet':
                assert self.resdepth in [
                    121, 161, 169, 201
                ], "Non-standard and unsupported densenet depth ({})".format(
                    self.resdepth)
                if self.resdepth == 121:
                    net = DenseNet121()
                elif self.resdepth == 161:
                    net = DenseNet161()
                elif self.resdepth == 169:
                    net = DenseNet169()
                else:
                    net = DenseNet201()
            elif model == 'preact_resnet':
                assert self.resdepth in [
                    10, 18, 34, 50, 101, 152
                ], "Non-standard and unsupported preact resnet depth ({})".format(
                    self.resdepth)
                if self.resdepth == 10:
                    net = PreActResNet10(path_fc=path_fc,
                                         num_classes=self.num_classes,
                                         upsample=upsample)
                elif self.resdepth == 18:
                    net = PreActResNet18()
                elif self.resdepth == 34:
                    net = PreActResNet34()
                elif self.resdepth == 50:
                    net = PreActResNet50()
                elif self.resdepth == 101:
                    net = PreActResNet101()
                else:
                    net = PreActResNet152()
            elif model == 'wresnet':
                assert (
                    (self.resdepth - 4) % 6 == 0
                ), "Wideresnet depth of {} not supported, must fulfill: (depth - 4) % 6 = 0".format(
                    self.resdepth)
                net = WideResNet(depth=self.resdepth,
                                 num_classes=self.num_classes,
                                 widen_factor=self.widen_factor)

        return net
Beispiel #27
0
def main():

    parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
    parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
    parser.add_argument('--resume',
                        '-r',
                        action='store_true',
                        help='resume from checkpoint')
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    best_acc = 0  # best test accuracy
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch

    # Data
    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=128,
                                               shuffle=True,
                                               num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=128,
                                              shuffle=False,
                                              num_workers=2)

    net = lsuv_init(ResNet34(),
                    train_loader,
                    needed_std=1.0,
                    std_tol=0.1,
                    max_attempts=10,
                    do_orthonorm=True,
                    device=device)

    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isdir(
            'checkpoint'), 'Error: no checkpoint directory found!'
        checkpoint = torch.load('./checkpoint/ckpt.t7')
        net.load_state_dict(checkpoint['net'])
        best_acc = checkpoint['acc']
        start_epoch = checkpoint['epoch']

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=5e-4)

    for epoch in range(start_epoch, start_epoch + 200):
        train(net, train_loader, criterion, optimizer, epoch, device)
        test(net, test_loader, criterion, epoch, device)
Beispiel #28
0
 def select(self, model, args):
     """
     Selector utility to create models from model directory
     :param model: which model to select. Currently choices are: (cnn | resnet | preact_resnet | densenet | wresnet)
     :return: neural network to be trained
     """
     if model == 'cnn':
         net = SimpleModel(in_shape=self.in_shape,
                           activation=args.activation,
                           num_classes=self.num_classes,
                           filters=args.filters,
                           strides=args.strides,
                           kernel_sizes=args.kernel_sizes,
                           linear_widths=args.linear_widths,
                           use_batch_norm=args.use_batch_norm)
     else:
         assert (args.dataset != 'MNIST' and args.dataset != 'Fashion-MNIST'), \
             "Cannot use resnet or densenet for mnist style data"
         if model == 'resnet':
             assert args.resdepth in [18, 34, 50, 101, 152], \
                 "Non-standard and unsupported resnet depth ({})".format(args.resdepth)
             if args.resdepth == 18:
                 net = ResNet18(self.num_classes)
             elif args.resdepth == 34:
                 net = ResNet34(self.num_classes)
             elif args.resdepth == 50:
                 net = ResNet50(self.num_classes)
             elif args.resdepth == 101:
                 net = ResNet101(self.num_classes)
             else:
                 net = ResNet152()
         elif model == 'densenet':
             assert args.resdepth in [121, 161, 169, 201], \
                 "Non-standard and unsupported densenet depth ({})".format(args.resdepth)
             if args.resdepth == 121:
                 net = DenseNet121(
                     growth_rate=12, num_classes=self.num_classes
                 )  # NB NOTE: growth rate controls cifar implementation
             elif args.resdepth == 161:
                 net = DenseNet161(growth_rate=12,
                                   num_classes=self.num_classes)
             elif args.resdepth == 169:
                 net = DenseNet169(growth_rate=12,
                                   num_classes=self.num_classes)
             else:
                 net = DenseNet201(growth_rate=12,
                                   num_classes=self.num_classes)
         elif model == 'preact_resnet':
             assert args.resdepth in [18, 34, 50, 101, 152], \
                 "Non-standard and unsupported preact resnet depth ({})".format(args.resdepth)
             if args.resdepth == 18:
                 net = PreActResNet18(self.num_classes)
             elif args.resdepth == 34:
                 net = PreActResNet34(self.num_classes)
             elif args.resdepth == 50:
                 net = PreActResNet50(self.num_classes)
             elif args.resdepth == 101:
                 net = PreActResNet101(self.num_classes)
             else:
                 net = PreActResNet152()
         elif model == 'wresnet':
             assert ((args.resdepth - 4) % 6 == 0), \
                 "Wideresnet depth of {} not supported, must fulfill: (depth - 4) % 6 = 0".format(args.resdepth)
             net = WideResNet(depth=args.resdepth,
                              num_classes=self.num_classes,
                              widen_factor=args.widen_factor)
         else:
             raise NotImplementedError(
                 'Model {} not supported'.format(model))
     return net
Beispiel #29
0
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=2)

else:
    raise NotImplementedError('Invalid dataset')

# Model
if opt.model == 'vgg':
    from models.vgg import VGG
    net = nn.DataParallel(VGG('VGG16', nclass, img_width=img_width).cuda())
elif opt.model == 'resnet':
    from models.resnet import ResNet34
    net = nn.DataParallel(ResNet34().cuda())
else:
    raise NotImplementedError('Invalid model')

#checkpoint = torch.load('./checkpoint/cifar10_vgg16_teacher.pth')
#net.load_state_dict(checkpoint)

# Loss function
criterion = nn.CrossEntropyLoss()


def cross_entropy(pred, soft_targets):
    logsoftmax = nn.LogSoftmax()
    return torch.mean(torch.sum(-soft_targets * logsoftmax(pred), 1))