def __restore(self, file_name, sess):
     variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="network/mobilenet_encoder")
     dict = load_obj(file_name)
     for variable in variables:
         for key, value in dict.items():
             print('Layer Loaded ', key)
             if key in variable.name:
                 sess.run(tf.assign(variable, value))
Esempio n. 2
0
 def __restore(file_name, sess):
     variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                   scope="network/mobilenet_encoder")
     net_dict = load_obj(file_name)
     for variable in variables:
         # 去掉network/
         name_in_pkl = variable.name[8:]
         if name_in_pkl in net_dict:
             print('Loaded:', variable.name)
             sess.run(tf.assign(variable, net_dict[name_in_pkl]))
    def __convert_graph_names(self, path):
        """
        This function is to convert from the mobilenet original model pretrained weights structure to our
        model pretrained weights structure.
        :param path: (string) path to the original pretrained weights .pkl file
        :return: None
        """
        dict = load_obj(path)
        variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='mobilenet_encoder')
        dict_output = {}
        # for variable in variables:
        #     print(variable.name)
        # for key, value in dict.items():
        #     print(key)

        for key, value in dict.items():
            for variable in variables:
                for i in range(len(dict)):
                    for j in range(len(variables)):
                        if ((key.find("Conv2d_" + str(i) + "_") != -1 and variable.name.find(
                                        "conv_ds_" + str(j) + "/") != -1) and i + 1 == j):
                            if key.find("depthwise") != -1 and variable.name.find(
                                    "depthwise") != -1 and (key.find("gamma") != -1 and variable.name.find(
                                "gamma") != -1 or key.find("beta") != -1 and variable.name.find(
                                "beta") != -1) or key.find("pointwise") != -1 and variable.name.find(
                                "pointwise") != -1 and (key.find("gamma") != -1 and variable.name.find(
                                "gamma") != -1 or key.find("beta") != -1 and variable.name.find(
                                "beta") != -1) or key.find("pointwise/weights") != -1 and variable.name.find(
                                "pointwise/weights") != -1 or key.find(
                                "depthwise_weights") != -1 and variable.name.find(
                                "depthwise/weights") != -1 or key.find("pointwise/biases") != -1 and variable.name.find(
                                "pointwise/biases") != -1 or key.find("depthwise/biases") != -1 and variable.name.find(
                                "depthwise/biases") != -1 or key.find("1x1/weights") != -1 and variable.name.find(
                                "1x1/weights") != -1 or key.find("1x1/biases") != -1 and variable.name.find(
                                "1x1/biases") != -1:
                                dict_output[variable.name] = value
                        elif key.find(
                                "Conv2d_0/") != -1 and variable.name.find("conv_1/") != -1:
                            if key.find("weights") != -1 and variable.name.find("weights") != -1 or key.find(
                                    "biases") != -1 and variable.name.find(
                                "biases") != -1 or key.find("beta") != -1 and variable.name.find(
                                "beta") != -1 or key.find("gamma") != -1 and variable.name.find(
                                "gamma") != -1:
                                dict_output[variable.name] = value

        save_obj(dict_output, self.pretrained_path)
        print("Pretrained weights converted to the new structure. The filename is mobilenet_v1.pkl.")
    def __restore(self, file_name, sess):
        variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        try:
            print("Loading ImageNet pretrained weights...")
            dict = load_obj(file_name)
            run_list = []
            for variable in variables:
                for key, value in dict.items():
                    # Adding ':' means that we are interested in the variable itself and not the variable parameters
                    # that are used in adaptive optimizers
                    if key + ":" in variable.name:
                        run_list.append(tf.assign(variable, value))

            sess.run(run_list)
            print("ImageNet Pretrained Weights Loaded Initially\n\n")
        except KeyboardInterrupt:
            print("No pretrained ImageNet weights exist. Skipping...\n\n")
    def __save(self, sess):
        file_name= self.pretrained_path
        variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        try:
            print("Loading ImageNet pretrained weights...")
            dict = load_obj(file_name)
            saved_dict= {}
            run_list = []
            for variable in variables:
                for key, value in dict.items():
                    if key + ":" in variable.name:
                        saved_dict[key]= sess.run(variable)

            save_obj(saved_dict, 'pretrained_weights/shufflenet_cityscapes.pkl')
            for k, v in saved_dict.items():
                if k not in dict.keys():
                    import pdb; pdb.set_trace()

            print("ImageNet Pretrained Weights Loaded Initially\n\n")
        except KeyboardInterrupt:
            print("No pretrained ImageNet weights exist. Skipping...\n\n")
Esempio n. 6
0
def prepare_dataloaders(dataset_split,
                        dataset_path,
                        metadata_filename,
                        batch_size=32,
                        sample_size=-1,
                        valid_split=0.8):
    '''
    Utility function to prepare dataloaders for training.

    Parameters
    ----------
    dataset_split : str
        Any of 'train', 'extra', 'test'.
    dataset_path : str
        Absolute path to the dataset. (i.e. .../data/SVHN/train')
    metadata_filename : str
        Absolute path to the metadata pickle file.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.
    valid_split : float
        Returns a validation split of %size; valid_split*100,
        valid_split should be in range [0,1].

    Returns
    -------
    if dataset_split in ['train', 'extra']:
        train_loader: torch.utils.DataLoader
            Dataloader containing training data.
        valid_loader: torch.utils.DataLoader
            Dataloader containing validation data.

    if dataset_split in ['test']:
        test_loader: torch.utils.DataLoader
            Dataloader containing test data.

    '''

    assert dataset_split in ['train', 'test', 'extra'], "check dataset_split"

    metadata = load_obj(metadata_filename)

    #  dataset_path = datadir / dataset_split

    firstcrop = FirstCrop(0.3)
    rescale = Rescale((64, 64))
    random_crop = RandomCrop((54, 54))
    to_tensor = ToTensor()

    # Declare transformations

    transform = transforms.Compose(
        [firstcrop, rescale, random_crop, to_tensor])

    dataset = SVHNDataset(metadata, data_dir=dataset_path, transform=transform)

    indices = np.arange(len(metadata))
    #  indices = np.random.permutation(indices)

    # Only use a sample amount of data
    if sample_size != -1:
        indices = indices[:sample_size]

    if dataset_split in ['train', 'extra']:

        train_idx = indices[:round(valid_split * len(indices))]
        valid_idx = indices[round(valid_split * len(indices)):]

        train_sampler = torch.utils.data.SubsetRandomSampler(train_idx)
        valid_sampler = torch.utils.data.SubsetRandomSampler(valid_idx)

        # Prepare a train and validation dataloader
        train_loader = DataLoader(dataset,
                                  batch_size=batch_size,
                                  shuffle=False,
                                  num_workers=4,
                                  sampler=train_sampler)

        valid_loader = DataLoader(dataset,
                                  batch_size=batch_size,
                                  shuffle=False,
                                  num_workers=4,
                                  sampler=valid_sampler)

        return train_loader, valid_loader

    elif dataset_split in ['test']:

        test_sampler = torch.utils.data.SequentialSampler(indices)

        # Prepare a test dataloader
        test_loader = DataLoader(dataset,
                                 batch_size=batch_size,
                                 num_workers=4,
                                 shuffle=False,
                                 sampler=test_sampler)

        return test_loader
Esempio n. 7
0
def prepare_dataloaders(
    dataset_split,
    dataset_path,
    metadata_filename,
    batch_size=32,
    sample_size=-1,
    valid_split=0.1,
    test_split=0.1,
    num_worker=0,
    valid_metadata_filename=None,
    valid_dataset_dir=None,
):
    """
    Utility function to prepare dataloaders for training.

    Parameters
    ----------
    dataset_split : str
        Any of 'train', 'extra', 'test'.
    dataset_path : str
        Absolute path to the dataset. (i.e. .../data/SVHN/train')
    metadata_filename : str
        Absolute path to the metadata pickle file.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.
    valid_split : float
        Returns a validation split of %size; valid_split*100,
        valid_split should be in range [0,1].

    Returns
    -------
    if dataset_split in ['train', 'extra']:
        train_loader: torch.utils.DataLoader
            Dataloader containing training data.
        valid_loader: torch.utils.DataLoader
            Dataloader containing validation data.

    if dataset_split in ['test']:
        test_loader: torch.utils.DataLoader
            Dataloader containing test data.

    """

    assert dataset_split in ["train", "test", "extra"], "check dataset_split"

    metadata = load_obj(metadata_filename)

    #  dataset_path = datadir / dataset_split

    firstcrop = FirstCrop(0.3)
    downscale = Rescale((64, 64))
    random_crop = RandomCrop((54, 54))
    to_tensor = ToTensor()
    normalize = None
    # normalize = Normalize((0.434, 0.442, 0.473), (0.2, 0.202, 0.198))

    # Declare transformations

    transform = transforms.Compose(
        [firstcrop, downscale, random_crop, to_tensor])
    test_transform = transforms.Compose(
        [FirstCrop(0.1), Rescale((54, 54)), to_tensor])

    dataset = SVHNDataset(
        metadata,
        data_dir=dataset_path,
        transform=transform,
        normalize_transform=normalize,
    )

    dataset_length = len(metadata)

    indices = np.arange(dataset_length)
    # Only use a sample amount of data
    if sample_size != -1:
        indices = indices[:sample_size]
        dataset_length = sample_size

    if dataset_split in ["train", "extra"]:

        # Prepare a train and validation dataloader
        valid_loader = None
        if valid_dataset_dir is not None:
            valid_metadata = load_obj(valid_metadata_filename)
            valid_dataset = SVHNDataset(
                valid_metadata,
                data_dir=valid_dataset_dir,
                transform=test_transform,
                normalize_transform=normalize,
            )
            valid_loader = DataLoader(
                valid_dataset,
                batch_size=batch_size,
                shuffle=False,
                num_workers=num_worker,
            )

        train_sampler = torch.utils.data.SubsetRandomSampler(indices)
        train_loader = DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=False,
            sampler=train_sampler,
            num_workers=num_worker,
        )

        return train_loader, valid_loader

    elif dataset_split in ["test"]:

        test_sampler = torch.utils.data.SequentialSampler(indices)

        # change the transformer pipeline
        dataset.transform = test_transform

        # Prepare a test dataloader
        test_loader = DataLoader(
            dataset,
            batch_size=batch_size,
            num_workers=4,
            shuffle=False,
            sampler=test_sampler,
        )

        return test_loader
Esempio n. 8
0
def prepare_test_dataloader(dataset_path, metadata_filename, batch_size,
                            sample_size):
    """
    Utility function to prepare dataloaders for testing.

    Parameters of the configuration (cfg)
    -------------------------------------
    dataset_path : str
        Absolute path to the test dataset. (i.e. .../data/SVHN/test')
    metadata_filename : str
        Absolute path to the metadata pickle file.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.
    valid_split : float
        Returns a validation split of %size; valid_split*100,
        valid_split should be in range [0,1].

    Returns
    -------
        test_loader: torch.utils.DataLoader
            Dataloader containing test data.

    """

    firstcrop = FirstCrop(0.3)
    rescale = Rescale((64, 64))
    random_crop = RandomCrop((54, 54))
    to_tensor = ToTensor()

    # Declare transformations

    transform = transforms.Compose([
        firstcrop,
        rescale,
        random_crop,
        to_tensor,
    ])

    # Load metadata file
    metadata = load_obj(metadata_filename)

    # Initialize Dataset
    dataset = SVHNDataset(metadata, data_dir=dataset_path, transform=transform)

    indices = np.arange(len(metadata))

    # Only use a sample amount of data
    if sample_size != -1:
        indices = indices[:sample_size]

    test_sampler = torch.utils.data.SequentialSampler(indices)

    # Prepare a test dataloader
    test_loader = DataLoader(dataset,
                             batch_size=batch_size,
                             num_workers=4,
                             shuffle=False,
                             sampler=test_sampler)
    return test_loader
Esempio n. 9
0
def prepare_dataloaders(cfg):

    dataset_path = cfg.INPUT_DIR
    metadata_filename = cfg.METADATA_FILENAME
    batch_size = cfg.TRAIN.BATCH_SIZE
    sample_size = cfg.TRAIN.SAMPLE_SIZE
    valid_split = cfg.TRAIN.VALID_SPLIT
    """
    Utility function to prepare dataloaders for training.

    Parameters of the configuration (cfg)
    -------------------------------------
    dataset_path : str
        Absolute path to the dataset. (i.e. .../data/SVHN/train')
    metadata_filename : str
        Absolute path to the metadata pickle file.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.
    valid_split : float
        Returns a validation split of %size; valid_split*100,
        valid_split should be in range [0,1].
    
    Returns
    -------
        train_loader: torch.utils.DataLoader
            Dataloader containing training data.
        valid_loader: torch.utils.DataLoader
            Dataloader containing validation data.

    """

    firstcrop = FirstCrop(0.3)
    rescale = Rescale((64, 64))
    random_crop = RandomCrop((54, 54))
    to_tensor = ToTensor()

    # Declare transformations

    transform = transforms.Compose([
        firstcrop,
        rescale,
        random_crop,
        to_tensor,
    ])

    # index 0 for train subset, 1 for extra subset
    metadata_train = load_obj(metadata_filename[0])
    metadata_extra = load_obj(metadata_filename[1])

    train_data_dir = dataset_path[0]
    extra_data_dir = dataset_path[1]

    valid_split_train = valid_split[0]
    valid_split_extra = valid_split[1]

    # Initialize the combined Dataset
    dataset = FullSVHNDataset(metadata_train,
                              metadata_extra,
                              train_data_dir,
                              extra_data_dir,
                              transform=transform)

    indices_train = np.arange(len(metadata_train))
    indices_extra = np.arange(len(metadata_train),
                              len(metadata_extra) + len(metadata_train))

    # Only use a sample amount of data
    if sample_size[0] != -1:
        indices_train = indices_train[:sample_size[0]]

    if sample_size[1] != -1:
        indices_extra = indices_extra[:sample_size[1]]

    # Select the indices to use for the train/valid split from the 'train' subset
    train_idx_train = indices_train[:round(valid_split_train *
                                           len(indices_train))]
    valid_idx_train = indices_train[round(valid_split_train *
                                          len(indices_train)):]

    # Select the indices to use for the train/valid split from the 'extra' subset
    train_idx_extra = indices_extra[:round(valid_split_extra *
                                           len(indices_extra))]
    valid_idx_extra = indices_extra[round(valid_split_extra *
                                          len(indices_extra)):]

    # Combine indices from 'train' and 'extra' as one single train/validation split
    train_idx = np.concatenate((train_idx_train, train_idx_extra))
    valid_idx = np.concatenate((valid_idx_train, valid_idx_extra))

    # Define the data samplers
    train_sampler = torch.utils.data.SubsetRandomSampler(train_idx)
    valid_sampler = torch.utils.data.SubsetRandomSampler(valid_idx)

    # Prepare a train and validation dataloader
    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=4,
                              sampler=train_sampler)

    valid_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=4,
                              sampler=valid_sampler)

    return train_loader, valid_loader