def demo(depth=58, growth_rate=12, efficient=False):
    # Get densenet configuration
    if (depth - 4) % 3:
        raise Exception('Invalid depth')
    block_config = [(depth - 4) // 6 for _ in range(3)]
    model = DenseNet(
        growth_rate=growth_rate,
        block_config=block_config,
        num_classes=4,
        small_inputs=True,
        efficient=efficient,
    )
    model.load_state_dict(torch.load(os.path.join('./ckpt2/model.dat')))
    t = list(model.state_dict())
    n = len(t)
    w = []
    for x in range(3):
        w.append([])
        for i in range(9):
            w[x].append([])
            for j in range(9):
                w[x][i].append(0)
    for name in model.state_dict():
        if len(name) == 49 and name[37] == 'c':
            x, i, j = int(name[19]), int(name[32]), int(name[34])
            a = abs(model.state_dict()[name])
            w[x - 1][j][i - 1] = a.sum()
    for x in range(3):
        for i in range(9):
            mx = 0
            for j in range(i, 9):
                mx = max(mx, w[x][i][j])
            for j in range(i, 9):
                w[x][i][j] = w[x][i][j] / mx
    mask = []
    for i in range(9):
        mask.append([])
        for j in range(9):
            mask[i].append(j > i)
    ax = []
    for x in range(3):
        sns.set()
        ax.append(sns.heatmap(w[x], vmin = 0, vmax = 1, cmap = 'jet', square = True, mask = mask))
        ax[x].set_title('Dense Block %s' % (x + 1))
        ax[x].set_xlabel('Target layer (l)', fontsize=15)
        ax[x].set_ylabel('Source layer (s)', fontsize=15)
        plt.show(ax[x])
Esempio n. 2
0
class LineFilter:
    def __init__(self):
        self.model = DenseNet(growth_rate=8,
                              block_config=(2, 2, 2),
                              bn_size=4,
                              drop_rate=0,
                              num_init_features=8 * 2,
                              small_inputs=True,
                              efficient=True)
        self.model.eval()
        self.model.load_state_dict(
            torch.load("save/param_best.pth",
                       map_location=lambda storage, loc: storage))
        summary(self.model, input_size=(3, 480, 640))

    def predict(self, input_data):
        output = self.model(input_data).squeeze()
        output[output > 255] = 255
        output[output < 150] = 0
        output = output.detach().numpy()
        return output.astype(dtype=np.uint8)
def demo(data, save, depth=40, growth_rate=12, batch_size=256):
    """
    Applies temperature scaling to a trained model.

    Takes a pretrained DenseNet-CIFAR100 model, and a validation set
    (parameterized by indices on train set).
    Applies temperature scaling, and saves a temperature scaled version.

    NB: the "save" parameter references a DIRECTORY, not a file.
    In that directory, there should be two files:
    - model.pth (model state dict)
    - valid_indices.pth (a list of indices corresponding to the validation set).

    data (str) - path to directory where data should be loaded from/downloaded
    save (str) - directory with necessary files (see above)
    """
    # Load model state dict
    model_filename = os.path.join(save, 'model.pth')
    if not os.path.exists(model_filename):
        raise RuntimeError('Cannot find file %s to load' % model_filename)
    state_dict = torch.load(model_filename)

    # Load validation indices
    valid_indices_filename = os.path.join(save, 'valid_indices.pth')
    if not os.path.exists(valid_indices_filename):
        raise RuntimeError('Cannot find file %s to load' %
                           valid_indices_filename)
    valid_indices = torch.load(valid_indices_filename)

    # Regenerate validation set loader
    mean = [0.5071, 0.4867, 0.4408]
    stdv = [0.2675, 0.2565, 0.2761]
    test_transforms = tv.transforms.Compose([
        tv.transforms.ToTensor(),
        tv.transforms.Normalize(mean=mean, std=stdv),
    ])
    valid_set = tv.datasets.CIFAR100(data,
                                     train=True,
                                     transform=test_transforms,
                                     download=True)
    valid_loader = torch.utils.data.DataLoader(
        valid_set,
        pin_memory=True,
        batch_size=batch_size,
        sampler=SubsetRandomSampler(valid_indices))

    # Load original model
    if (depth - 4) % 3:
        raise Exception('Invalid depth')
    block_config = [(depth - 4) // 6 for _ in range(3)]
    orig_model = DenseNet(growth_rate=growth_rate,
                          block_config=block_config,
                          num_classes=100).cuda()
    orig_model.load_state_dict(state_dict)

    # Now we're going to wrap the model with a decorator that adds temperature scaling
    model = ModelWithTemperature(orig_model)

    # Tune the model temperature, and save the results
    model.set_temperature(valid_loader)
    model_filename = os.path.join(save, 'model_with_temperature.pth')
    torch.save(model.state_dict(), model_filename)
    print('Temperature scaled model sved to %s' % model_filename)
    print('Done!')
            # include the validation indices
            torch.save(model.state_dict(), os.path.join(save, 'model.pth'))
            torch.save(valid_indices, os.path.join(save, 'valid_indices.pth'))

    print('Done!')


if __name__ == '__main__':
    """
    Train a 40-layer DenseNet-BC on CIFAR-100

    Args:
        --data (str) - path to directory where data should be loaded from/downloaded
            (default $DATA_DIR)
        --save (str) - path to save the model to (default /tmp)

        --valid_size (int) - size of validation set
        --seed (int) - manually set the random seed (default None)
    """

    data = path + '\\data\\'
    save = path + 'model\\'

    block_config = [(40 - 4) // 6 for _ in range(3)]
    model = DenseNet(growth_rate=12,
                     block_config=block_config,
                     num_classes=100)
    params = torch.load(save + 'model.pth')
    model.load_state_dict(params)
    # fire.Fire(train)
Esempio n. 5
0
print(model)

if not os.path.isdir(args.checkpoints):
    os.mkdir(args.checkpoints)

# optionally resume from a checkpoint
if args.resume:
    if os.path.isfile(args.resume):
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        if 'epoch' in checkpoint:
            args.start_epoch = checkpoint['epoch'] + 1
            state_dict = checkpoint['state_dict']
        else:
            state_dict = checkpoint
        model.load_state_dict(state_dict=state_dict, strict=False)
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, args.start_epoch - 1))
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))
print(args)

if len(args.gpus) > 0:
    model.cuda()
    cudnn.benchmark = True
    if len(args.gpus) > 1:
        model = nn.DataParallel(model, device_ids=args.gpus).cuda()

engine = Engine()
meter_loss = tnt.meter.AverageValueMeter()
topk = [1, 5]
Esempio n. 6
0
use_cuda = True
bn_size = None
multigpus = False
is_eval = False
model = DenseNet(input_size=32, bn_size=bn_size, efficient=False)
model_effi = DenseNet(input_size=32, bn_size=bn_size, efficient=True)
# for stronger test
model.features.denseblock2.denselayer12._modules['norm1'].running_mean.fill_(1)
model.features.denseblock2.denselayer12._modules['norm1'].running_var.fill_(2)
state = model.state_dict()
state = OrderedDict(
    (k.replace('.norm1.', '.bottleneck.norm_'), v) for k, v in state.items())
state = OrderedDict(
    (k.replace('.conv1.', '.bottleneck.conv_'), v) for k, v in state.items())

model_effi.load_state_dict(state)
if use_cuda:
    model.cuda()
    model_effi.cuda()
    cudnn.deterministic = True
    if multigpus:
        model = nn.DataParallel(model, device_ids=[0, 1])
        model_effi = nn.DataParallel(model_effi, device_ids=[0, 1])
if is_eval:
    model.eval()
    model_effi.eval()
# create the model inputs
input_var = torch.randn(8, 3, 32, 32)
if use_cuda:
    input_var = input_var.cuda()
Esempio n. 7
0
def demo(data='./',
         save='./output201',
         depth=100,
         growth_rate=12,
         efficient=True,
         valid_size=None,
         n_epochs=5,
         batch_size=64,
         seed=None):
    """
    A demo to show off training of efficient DenseNets.
    Trains and evaluates a DenseNet-BC on CIFAR-10.

    Args:
        data (str) - path to directory where data should be loaded from/downloaded
            (default $DATA_DIR)
        save (str) - path to save the model to (default /tmp)

        depth (int) - depth of the network (number of convolution layers) (default 40)
        growth_rate (int) - number of features added per DenseNet layer (default 12)
        efficient (bool) - use the memory efficient implementation? (default True)

        valid_size (int) - size of validation set
        n_epochs (int) - number of epochs for training (default 300)
        batch_size (int) - size of minibatch (default 256)
        seed (int) - manually set the random seed (default None)
    """

    # Get densenet configuration
    if (depth - 4) % 3:
        raise Exception('Invalid depth')
    block_config = [(depth - 4) // 6 for _ in range(4)]

    # Data transforms
    mean = [0.5071, 0.4867, 0.4408]
    stdv = [0.2675, 0.2565, 0.2761]
    train_transforms = transforms.Compose([
        #transforms.RandomCrop(224, padding=4),
        #transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])
    test_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])

    # Datasets
    train_set = faceDataset(data,
                            train=True,
                            transform=train_transforms,
                            download=False)
    test_set = facetestDataset(data,
                               train=False,
                               transform=test_transforms,
                               download=False)

    if valid_size:
        valid_set = faceDataset(data,
                                train=False,
                                transform=test_transforms,
                                download=False)
        indices = torch.randperm(len(train_set))
        train_indices = indices[:len(indices) - valid_size]
        valid_indices = indices[len(indices) - valid_size:]
        train_set = torch.utils.data.Subset(train_set, train_indices)
        valid_set = torch.utils.data.Subset(valid_set, valid_indices)
    else:
        valid_set = None

    # Models
    model = DenseNet(
        growth_rate=growth_rate,
        block_config=block_config,
        num_classes=200,
        small_inputs=False,
        efficient=efficient,
        #drop_rate=0.5
    )
    print(model)

    # Make save directory
    if not os.path.exists(save):
        os.makedirs(save)
    if not os.path.isdir(save):
        raise Exception('%s is not a dir' % save)
    model.load_state_dict(
        torch.load(
            'D:\\Model-Training\\efficient_densenet_pytorch\\output201\\model.dat'
        ))
    # Train the model
    train(model=model,
          train_set=train_set,
          valid_set=valid_set,
          test_set=test_set,
          save=save,
          n_epochs=n_epochs,
          batch_size=batch_size,
          seed=seed)
    print('Done!')
Esempio n. 8
0
def load_pretrained_models(directory,
                           depths,
                           growth_rates,
                           num_trials=2,
                           num_classes=10,
                           ensemble_size=4,
                           dataset="cifar10",
                           vary="d"):
    """
        Return pretrained model stored in specified directory
        OUTPUT -- OrderedDict, KEYS: gr|d|e, value: array of size num_trials; each 
                    position stores the pretrained model
    """
    result = {}
    result['single'] = OrderedDict()
    result['vertical'] = OrderedDict()
    result['horizontal'] = OrderedDict()

    for growth_rate, depth in list(itertools.product(growth_rates, depths)):

        d = "gr_" + str(growth_rate) + "_d_" + str(depth) + "_e_" + str(
            ensemble_size) + "_" + str(dataset)

        k = "|".join([str(growth_rate), str(depth), str(ensemble_size)])

        result['single'][k] = []
        result['vertical'][k] = []
        result['horizontal'][k] = []

        for i in range(num_trials):

            # load single network
            os.chdir(os.path.join(directory, d + "_vd"))
            model = DenseNet(
                growth_rate=growth_rate,
                block_config=[(depth - 4) // 6 for _ in range(3)],
                num_classes=num_classes,
                small_inputs=True,
                efficient=False,
            )
            state_dict = torch.load(str(i) + "single_model.dat",
                                    map_location=torch.device('cpu'))
            model.load_state_dict(state_dict)
            result['single'][k].append(model)

            # vertical ensembles
            os.chdir(os.path.join(directory, d + "_vd"))
            ensemble_depth, _ = setup.get_ensemble_depth(
                depth=depth,
                growth_rate=growth_rate,
                ensemble_size=ensemble_size)

            vertical_ensembles = []
            for e in range(ensemble_size):
                vertical_ensembles.append(
                    DenseNet(
                        growth_rate=growth_rate,
                        block_config=[(ensemble_depth - 4) // 6
                                      for _ in range(3)],
                        num_classes=num_classes,
                        small_inputs=True,
                        efficient=False,
                    ))
                state_dict = torch.load(str(e) + str(i) + "_model.dat",
                                        map_location=torch.device('cpu'))
                vertical_ensembles[e].load_state_dict(state_dict)
            result['vertical'][k].append(vertical_ensembles)

            # horizontal ensembles
            os.chdir(os.path.join(directory, d + "_vgr"))
            ensemble_growth_rate, _ = setup.get_ensemble_growth_rate(
                depth=depth,
                growth_rate=growth_rate,
                ensemble_size=ensemble_size)
            horizontal_ensembles = []

            for e in range(ensemble_size):
                horizontal_ensembles.append(
                    DenseNet(
                        growth_rate=ensemble_growth_rate,
                        block_config=[(depth - 4) // 6 for _ in range(3)],
                        num_classes=num_classes,
                        small_inputs=True,
                        efficient=False,
                    ))
                state_dict = torch.load(str(e) + str(i) + "_model.dat",
                                        map_location=torch.device('cpu'))
                horizontal_ensembles[e].load_state_dict(state_dict)
            result['horizontal'][k].append(horizontal_ensembles)

        os.chdir(directory)
    return result