示例#1
0
def apply_second_layer_corr_no_reshape(model, configuration):
    trained = cornet.cornet_s(pretrained=True)
    idx = 0
    counter = 0
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            weights = m.weight.data.cpu().numpy()
            if idx == 0:
                counter += len(m.weight.data.cpu().numpy().flatten())
            if idx == 1:
                return counter + do_correlation_init_no_reshape(weights, None)
            idx += 1
示例#2
0
def apply_kernel_convolution_second_layer(model, configuration):
    trained = cornet.cornet_s(pretrained=True)
    idx = 0
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            weights = m.weight.data.cpu().numpy()
            if idx == 0:
                mod = trained.module.V1.conv1
                m.weight.data = mod.weight.data
                previous_weights = m.weight.data.cpu().numpy()
            if idx == 1:
                previous_weights = do_kernel_convolution_init(weights, previous_weights)
                m.weight.data = torch.Tensor(previous_weights)
            idx += 1
    return model
示例#3
0
def apply_second_layer_only(model, configuration):
    # Assume cornet is initialized to random weights
    trained = cornet.cornet_s(pretrained=True)
    idx = 0
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            weights = m.weight.data.cpu().numpy()
            if idx == 0:
                mod = trained.module.V1.conv1
                m.weight.data = mod.weight.data
                previous_weights = m.weight.data.numpy()
            if idx == 1:
                previous_weights = do_correlation_init(weights,
                                                       previous_weights)
                m.weight.data = torch.Tensor(previous_weights)

            idx += 1
    return model
示例#4
0
def do_distribution_weight_init(weights, config, index, **kwargs):
    # dimension = 0, kernel level, dimension 1 channel level
    # assume weights are untrained weights
    trained = cornet.cornet_s(pretrained=True, map_location=torch.device('cpu'))
    for sub in ['module'] + layers[index].split('.'):
        trained = trained._modules.get(sub)
    weights = trained.weight.data.cpu().numpy()
    dim = config['dim'] if 'dim' in config else config[f'dim_{index}']
    components = config[f'comp_{index}'] if f'comp_{index}' in config else 0
    if dim == 0:
        params = weights.reshape(weights.shape[0], weights.shape[1], -1)
        params = params.reshape(params.shape[0], -1)
    else:
        params = weights.reshape(-1, weights.shape[2], weights.shape[3])
        params = params.reshape(params.shape[0], -1)
    if components != 0:
        return components + components * params.shape[1] + components * params.shape[1] * params.shape[1]

    best_gmm = mixture_gaussian(params, params.shape[0], components, f'weight_dim{dim}_{index}', analyze=True)
    return len(best_gmm.weights_.flatten()) + len(best_gmm.means_.flatten()) + len(best_gmm.covariances_.flatten())
示例#5
0
    with open(output_path + f'results_CORnet-S_train_IT_random_2_gpus.pkl',
              'rb') as f:
        data = pickle.load(f)
        # validation = data['val']
        item = data[-1]
        print(item)
        print(data[len(data) - 1])
    # np.random.seed(0)
    # torch.manual_seed(0)
    # layer_based.random_state = RandomState(0)
    identifier = 'CORnet-S_train_IT_seed_0'
    mod = importlib.import_module(f'cornet.cornet_s')
    model_ctr = getattr(mod, f'CORnet_S')
    model = model_ctr()
    # model = cornet.cornet_s(True)
    model3 = cornet.cornet_s(False)
    model2 = cornet.cornet_s(False)
    if os.path.exists(output_path + f'{identifier}_epoch_20.pth.tar'):
        logger.info('Resore weights from stored results')
        checkpoint = torch.load(output_path + f'{identifier}_epoch_20.pth.tar',
                                map_location=lambda storage, loc: storage)
        model2.load_state_dict(checkpoint['state_dict'])
        checkpoint2 = torch.load(output_path + f'CORnet-S_random.pth.tar',
                                 map_location=lambda storage, loc: storage)

        class Wrapper(Module):
            def __init__(self, model):
                super(Wrapper, self).__init__()
                self.module = model

        model = Wrapper(model)
示例#6
0
def apply_generic_other(model, configuration):
    # second layer correlation plus first layer random gabors
    trained = cornet.cornet_s(pretrained=True,
                              map_location=torch.device('cpu')).module
    idx = 0
    previous_weights = None
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            # weights = m.weight.data.cpu().numpy()
            if name in configuration:
                trained_weigts = trained
                for part in configuration[name].split('.'):
                    trained_weigts = getattr(trained_weigts, part)
                trained_weigts = trained_weigts.weight.data.cpu().numpy()
                previous_weights = configuration[configuration[name]](
                    trained_weigts,
                    config=configuration,
                    shape=m.weight.data.cpu().shape,
                    previous=previous_weights,
                    index=layers.index(configuration[name]))
                assert m.weight.data.shape == previous_weights.shape
                m.weight.data = torch.Tensor(previous_weights)
                if m.bias is not None and 'bn_init' in configuration:
                    conv = configuration[name]
                    batchnorm = [
                        key for key, value in conv_to_norm.items()
                        if value == conv
                    ][0]
                    trained_weigts = trained
                    for part in batchnorm.split('.'):
                        trained_weigts = getattr(trained_weigts, part)
                    bias = trained_weigts.bias.data.cpu().numpy()
                    bias = configuration['bn_init'](
                        bias,
                        config=configuration,
                        previous=previous_weights,
                        index=idx,
                        shape=m.bias.data.cpu().shape)
                    m.bias.data = torch.Tensor(bias)
                elif m.bias is not None and 'no_bn' in configuration:
                    m.bias.data.fill_(0)
            idx += 1
        if type(m) == nn.BatchNorm2d and name in configuration:
            trained_weigts = trained
            for part in configuration[name].split('.'):
                trained_weigts = getattr(trained_weigts, part)
            if 'bn_init' in configuration:
                bias = trained_weigts.bias.data.cpu().numpy()
                bn_weight = trained_weigts.weight.data.cpu().numpy()
                bn_weight = configuration['bn_init'](
                    bn_weight,
                    config=configuration,
                    previous=previous_weights,
                    index=idx,
                    shape=m.weight.data.cpu().shape)
                bias = configuration['bn_init'](bias,
                                                config=configuration,
                                                previous=previous_weights,
                                                index=idx,
                                                shape=m.bias.data.cpu().shape)
                m.weight.data = torch.Tensor(bn_weight)
                m.bias.data = torch.Tensor(bias)
            if 'batchnorm' in configuration:
                m.track_running_stats = False
                length = m.running_var.shape[0]
                if m.running_var.shape[0] <= trained_weigts.running_var.shape[
                        0]:
                    m.running_var = trained_weigts.running_var[0:length]
                    m.running_mean = trained_weigts.running_mean[0:length]
                else:
                    tr_length = trained_weigts.running_var.shape[0]
                    for i in range(int(math.ceil(length / tr_length))):
                        end = (i + 1) * tr_length
                        end = length if end > length else end
                        m.running_var[
                            i * tr_length:end] = trained_weigts.running_var
                        m.running_mean[
                            i * tr_length:end] = trained_weigts.running_mean

    return model
示例#7
0
def apply_generic(model, configuration):
    # second layer correlation plus first layer random gabors
    trained = cornet.cornet_s(pretrained=True,
                              map_location=torch.device('cpu')).module
    idx = 0
    previous_weights = None
    previous_module = None
    previous_name = None
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            # weights = m.weight.data.cpu().numpy()
            if layers[idx] in configuration:
                trained_weigts = trained
                for part in name.split('.'):
                    trained_weigts = getattr(trained_weigts, part)
                trained_weigts = trained_weigts.weight.data.cpu().numpy()
                previous_weights = configuration[layers[idx]](
                    trained_weigts,
                    config=configuration,
                    previous=previous_weights,
                    shape=trained_weigts.shape,
                    index=idx,
                    model=model)
                m.weight.data = torch.Tensor(previous_weights)
            idx += 1
        if type(m) == nn.BatchNorm2d and not (
                any(value in name for value in configuration['layers'])
                or conv_to_norm[name] in configuration['layers']):
            trained_weigts = trained
            for part in name.split('.'):
                trained_weigts = getattr(trained_weigts, part)
            if 'bn_init' in configuration:
                bias = trained_weigts.bias.data.cpu().numpy()
                bn_weight = trained_weigts.weight.data.cpu().numpy()
                if configuration['bn_init'] == do_batch_from_image_init:
                    bn_weight, bn_bias = configuration['bn_init'](
                        m,
                        config=configuration,
                        previous=previous_weights,
                        index=idx,
                        shape=bn_weight.shape,
                        model=model,
                        previous_module=previous_module,
                        previous_name=previous_name)
                    m.weight.data = bn_weight
                    m.bias.data = bn_bias
                else:
                    bn_weight = configuration['bn_init'](
                        bn_weight,
                        config=configuration,
                        previous=previous_weights,
                        index=idx,
                        shape=bn_weight.shape,
                        model=model)
                    bias = configuration['bn_init'](bias,
                                                    config=configuration,
                                                    previous=previous_weights,
                                                    index=idx,
                                                    shape=bn_weight.shape,
                                                    model=model)
                m.weight.data = torch.Tensor(bn_weight)
                m.bias.data = torch.Tensor(bias)
                if 'momentum' in configuration:
                    m.momentum = 0
            if 'batchnorm' in configuration:
                m.track_running_stats = False
                m.running_var = trained_weigts.running_var
                m.running_mean = trained_weigts.running_mean
        previous_module = m
        previous_name = name

    return model
示例#8
0
from cornet import cornet_s, cornet_z, cornet_rt
import torch
Cnet = cornet_s(pretrained=True)  # 408Mb
# cornet_z(True)  # 15.8Mb
# cornet_rt(True)  # 39.8Mb
#%%
from layer_hook_utils import get_module_names, register_hook_by_module_names
module_names, module_types, module_spec = get_module_names(Cnet.module,
                                                           input_size=(3, 227,
                                                                       227),
                                                           device="cuda")
#%%
from grad_RF_estim import gradmap2RF_square, grad_RF_estimate
Cnet_m = Cnet.module
#%%
Cnet_m.cuda().eval()
for param in Cnet_m.parameters():
    param.requires_grad_(False)
unit_list = [
    ("Cornet_s", ".V1.ReLUnonlin1", 5, 57, 57),
    ("Cornet_s", ".V1.ReLUnonlin2", 5, 28, 28),
    ("Cornet_s", ".V2.Conv2dconv_input", 5, 28, 28),
    ("Cornet_s", ".CORblock_SV2", 5, 14, 14),
    ("Cornet_s", ".V4.Conv2dconv_input", 5, 14, 14),
    ("Cornet_s", ".CORblock_SV4", 5, 7, 7),
    ("Cornet_s", ".IT.Conv2dconv_input", 5, 7, 7),
    ("Cornet_s", ".CORblock_SIT", 5, 3, 3),
    (
        "Cornet_s",
        ".decoder.Linearlinear",
        5,
示例#9
0
    def __init__(self, model_name):
        if model_name == "vgg16":
            self.model = models.vgg16(pretrained=True)
            self.layers = list(self.model.features) + list(
                self.model.classifier)
            self.layername = layername_dict[model_name]
            self.model.cuda().eval()
            self.inputsize = (3, 227, 227)
        elif model_name == "vgg16-face":
            self.model = models.vgg16(pretrained=False, num_classes=2622)
            self.model.load_state_dict(
                torch.load(join(torchhome, "vgg16_face.pt")))
            self.layers = list(self.model.features) + list(
                self.model.classifier)
            self.layername = layername_dict["vgg16"]
            self.model.cuda().eval()
            self.inputsize = (3, 227, 227)
        elif model_name == "alexnet":
            self.model = models.alexnet(pretrained=True)
            self.layers = list(self.model.features) + list(
                self.model.classifier)
            self.layername = layername_dict[model_name]
            self.model.cuda().eval()
            self.inputsize = (3, 227, 227)
        elif model_name == "densenet121":
            self.model = models.densenet121(pretrained=True)
            self.layers = list(self.model.features) + [self.model.classifier]
            self.layername = layername_dict[model_name]
            self.model.cuda().eval()
            self.inputsize = (3, 227, 227)
        elif model_name == "densenet169":
            self.model = models.densenet169(pretrained=True)
            self.layername = None
            self.model.cuda().eval()
            self.inputsize = (3, 227, 227)
        elif model_name == "resnet101":
            self.model = models.resnet101(pretrained=True)
            self.inputsize = (3, 227, 227)
            self.layername = None
            self.model.cuda().eval()
        elif "resnet50" in model_name:
            if "resnet50-face" in model_name:  # resnet trained on vgg-face dataset.
                self.model = models.resnet50(pretrained=False,
                                             num_classes=8631)
                if model_name == "resnet50-face_ft":
                    self.model.load_state_dict(
                        torch.load(join(torchhome, "resnet50_ft_weight.pt")))
                elif model_name == "resnet50-face_scratch":
                    self.model.load_state_dict(
                        torch.load(
                            join(torchhome, "resnet50_scratch_weight.pt")))
                else:
                    raise NotImplementedError(
                        "Feasible names are resnet50-face_scratch, resnet50-face_ft"
                    )
            else:
                self.model = models.resnet50(pretrained=True)
                if model_name == "resnet50_linf_8":  # robust version of resnet50.
                    self.model.load_state_dict(
                        torch.load(join(torchhome, "imagenet_linf_8_pure.pt")))
                elif model_name == "resnet50_linf_4":
                    self.model.load_state_dict(
                        torch.load(join(torchhome, "imagenet_linf_4_pure.pt")))
                elif model_name == "resnet50_l2_3_0":
                    self.model.load_state_dict(
                        torch.load(join(torchhome, "imagenet_l2_3_0_pure.pt")))
            self.model.cuda().eval()
            self.inputsize = (3, 227, 227)
            self.layername = None
        elif model_name == "cornet_s":
            from cornet import cornet_s
            Cnet = cornet_s(pretrained=True)
            self.model = Cnet.module
            self.model.cuda().eval()
            self.inputsize = (3, 227, 227)
            self.layername = None
        else:
            raise NotImplementedError("Cannot find the specified model %s" %
                                      model_name)

        for param in self.model.parameters():
            param.requires_grad_(False)
        # self.preprocess = transforms.Compose([transforms.ToPILImage(),
        #                                       transforms.Resize(size=(224, 224)),
        #                                       transforms.ToTensor(),
        self.normalize = transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225])  # Imagenet normalization RGB
        self.RGBmean = torch.tensor([0.485, 0.456, 0.406]).view([1, 3, 1,
                                                                 1]).cuda()
        self.RGBstd = torch.tensor([0.229, 0.224, 0.225]).view([1, 3, 1,
                                                                1]).cuda()
        self.artiphys = False
        self.hooks = []