Пример #1
0
def get_unet_densenet121(input_shape,
                         inputs,
                         retrain=True,
                         with_bottleneck=False,
                         renorm=False):
    base_model = densenet.DenseNet121(input_shape=input_shape,
                                      input_tensor=inputs,
                                      include_top=False,
                                      weights='imagenet')

    if renorm:
        raise NotImplementedError()

    for i, layer in enumerate(base_model.layers):
        layer.trainable = retrain

    conv1 = base_model.get_layer("conv1/relu").output
    conv2 = base_model.get_layer("pool2_conv").output
    conv3 = base_model.get_layer("pool3_conv").output
    conv4 = base_model.get_layer("pool4_conv").output
    conv5 = base_model.get_layer("bn").output

    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 192, "conv7_1")
    conv7 = conv_block_simple(conv7, 192, "conv7_2")

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 128, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 64, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=-1)
    conv10 = conv_block_simple(up10, 32, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")

    if not with_bottleneck:
        return conv10
    else:
        return conv10, conv5
Пример #2
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    # Check the save_dir exists or not
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # take unknown classes
    unk_classes = set(list(map(int, args.unk)))
    print('-- Unknown classes are: ', unk_classes)

    # define the classifier
    model = densenet.DenseNet121()
    model = torch.nn.DataParallel(model)

    if args.cpu:
        model.cpu()
    else:
        model.cuda()

    # load model
    try:
        model.load_state_dict(
            torch.load(os.path.join(args.model_dir))['state_dict'])
        print("=> loaded model '{}'".format(args.model_dir))
    except:
        raise Exception('''Failed to load the model "%s"''' % args.model_dir)

    cudnn.benchmark = True

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # load training data
    transformed_dataset = datasets.CIFAR10(
        root='./data',
        train=True,
        transform=transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomCrop(32, 4),
            transforms.ToTensor(),
            normalize,
        ]),
        download=True)

    # extract known classes in trainig set
    train_loader = torch.utils.data.DataLoader(transformed_dataset,
                                               batch_size=1,
                                               shuffle=False)
    print('-- #samples in training set: ', len(train_loader.dataset))

    # load validation data
    transformed_dataset = datasets.CIFAR10(
        root='./data',
        train=False,
        transform=transforms.Compose([transforms.ToTensor(), normalize]))

    # extract known classes in the validation set
    val_loader = torch.utils.data.DataLoader(transformed_dataset,
                                             batch_size=1,
                                             shuffle=False)
    print('-- #samples in validation set: ', len(val_loader.dataset))

    all_train_zf = []
    all_train_label = []

    # testing
    model.eval()
    for input, target in train_loader:
        if args.cpu == False:
            input = input.cuda(async=True)

        if args.half:
            input = input.half()

        # compute output
        with torch.no_grad():
            zf = model(input)

        all_train_zf.append(zf.data.cpu().numpy())
        target = target.data.numpy()[0]
        if target in unk_classes:
            all_train_label.append(-1)
        else:
            all_train_label.append(target)

    sio.savemat(os.path.join(args.save_dir, 'F_results.mat'), {
        'train_zf': all_train_zf,
        'train_label': all_train_label
    })

    # make the format of the result to be appropriate for EDC block
    zfs = np.array(all_train_zf)
    n, _, c = np.shape(zfs)
    zfs = zfs.reshape((n, c))
    labels = np.array(all_train_label).reshape((n, 1))

    indices_kwn = np.where(labels != -1)
    indices_unk = np.where(labels == -1)

    gt_kwn = labels[indices_kwn[0], :] + 1
    gt_unk = labels[indices_unk[0], :] + 2
    zf_kwn = zfs[indices_kwn[0], :]
    zf_unk = zfs[indices_unk[0], :]

    sio.savemat(os.path.join(args.save_dir, args.dataset + '_kwn.mat'), {
        'x': zf_kwn,
        'y': gt_kwn
    })
    sio.savemat(os.path.join(args.save_dir, args.dataset + '_unk.mat'), {
        'x': zf_unk,
        'y': gt_unk
    })
Пример #3
0
import densenet
import dpn
import preact_resnet

cifar10_networks = {
    'lenet': lenet.LeNet(),
    'simplenet9': simplenet.SimpleNet9(),
    'simplenet9_thin': simplenet.SimpleNet9_thin(),
    'simplenet9_mobile': simplenet.SimpleNet9_mobile(),
    'simplenet7': simplenet.SimpleNet7(),
    'simplenet7_thin': simplenet.SimpleNet7_thin(),
    'resnet18NNFC1': resnet_with_compression.ResNet18NNFC1(),
    'resnet18EH0': resnet_with_compression.ResNet18EH(layer=0, quantizer=20),
    'resnet18EH1': resnet_with_compression.ResNet18EH(layer=1, quantizer=6),
    'resnet18EH2': resnet_with_compression.ResNet18EH(layer=2, quantizer=5),
    'resnet18EH3': resnet_with_compression.ResNet18EH(layer=3, quantizer=3),
    'resnet18EH4': resnet_with_compression.ResNet18EH(layer=4, quantizer=10),
    'resnet18JPEG90': resnet_with_compression.ResNet18JPEG(quantizer=90),
    'resnet18JPEG87': resnet_with_compression.ResNet18JPEG(quantizer=87),
    'resnet18AVC': resnet_with_compression.ResNet18AVC(layer=2, quantizer=24),
    'resnet18': resnet.ResNet18(),
    'resnet101': resnet.ResNet101(),
    'mobilenetslimplus': mobilenet.MobileNetSlimPlus(),
    'mobilenetslim': mobilenet.MobileNetSlim(),
    'mobilenet': mobilenet.MobileNet(),
    'mobilenetv2': mobilenetv2.MobileNetV2(),
    'densenet121': densenet.DenseNet121(),
    'dpn92': dpn.DPN92(),
    'preact_resnet18': preact_resnet.PreActResNet18(),
}
Пример #4
0
    # Load data.
    batch_size = args.batch_size
    trainloader, testloader = helper.load_imagenet200(args.datadir, \
            batch_size, kwargs)
    nr_classes = 200

    # Load the polars and update the trainy labels.
    classpolars = torch.from_numpy(np.load(args.hpnfile)).float()
    args.output_dims = int(args.hpnfile.split("/")[-1].split("-")[1][:-1])

    # Load the model.
    if args.network == "resnet32":
        model = resnet.ResNet(32, args.output_dims, 1, classpolars)
    elif args.network == "densenet121":
        model = densenet.DenseNet121(args.output_dims, classpolars)
    model = model.to(device)

    # Load the optimizer.
    optimizer = helper.get_optimizer(args.optimizer, model.parameters(), \
            args.learning_rate, args.momentum, args.decay)

    # Initialize the loss functions.
    f_loss = nn.CosineSimilarity(eps=1e-9).cuda()

    # Main loop.
    testscores = []
    learning_rate = args.learning_rate
    for i in xrange(args.epochs):
        print "---"
        # Learning rate decay.
Пример #5
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    # Check the save_dir exists or not
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # take unknown classes
    unk_classes = set(list(map(int, args.unk)))
    print('-- Unknown classes are: ', unk_classes)

    # define the classifier
    model = densenet.DenseNet121()
    model = torch.nn.DataParallel(model)

    if args.cpu:
        model.cpu()
    else:
        model.cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.evaluate, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # load training data
    transformed_dataset = datasets.CIFAR10(
        root='./data',
        train=True,
        transform=transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomCrop(32, 4),
            transforms.ToTensor(),
            normalize,
        ]),
        download=True)

    # extract known classes in training set
    transformed_dataset_kwn = []
    for i, (data, target) in enumerate(transformed_dataset):
        if target not in unk_classes:
            transformed_dataset_kwn.append((data, target))
    train_loader = torch.utils.data.DataLoader(transformed_dataset_kwn,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    print('-- #samples in training set: ', len(train_loader.dataset))

    # load validation data
    transformed_dataset = datasets.CIFAR10(
        root='./data',
        train=False,
        transform=transforms.Compose([transforms.ToTensor(), normalize]))

    # extract known classes in validation set
    transformed_dataset_kwn = []
    for i, (data, target) in enumerate(transformed_dataset):
        if target not in unk_classes:
            transformed_dataset_kwn.append((data, target))
    val_loader = torch.utils.data.DataLoader(transformed_dataset_kwn,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print('-- #samples in validation set: ', len(val_loader.dataset))

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()
    if args.cpu:
        criterion = criterion.cpu()
    else:
        criterion = criterion.cuda()

    if args.half:
        model.half()
        criterion.half()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    val_acc_list = []
    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)
        val_acc_list.append(prec1)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            },
            is_best,
            filename=os.path.join(args.save_dir, 'modelF_CIFAR10.tar'))

    sio.savemat(os.path.join(args.save_dir, 'F_val_acc.mat'),
                {'val_acc': val_acc_list})