Exemple #1
0
    def __init__(self, param: Parameter, dataset_names: List[str],
                 dataset_dict: Dict[str, InfoStorage],
                 indices: List[Tuple[str, int]], train: bool):
        DatasetBase.__init__(self, param, dataset_names, dataset_dict, indices,
                             train)
        self.single_label = param.single_label
        self.use_stop = param.model_level == 'low' and not param.is_control
        if self.use_stop:
            self.stop_probabilities = [
                self.get_stop_probability(i, self.max_data_length)
                for i in range(len(indices))
            ]

        self.use_low_level_segment = param.use_low_level_segment
        self.model_level = param.model_level
        self.output_dim = 1 if self.single_label else fetch_onehot_vector_dim(
            self.use_low_level_segment)
        self.onehot_func = partial(fetch_onehot_vector_from_road_option,
                                   low_level=self.use_low_level_segment)
        self.index_func = partial(fetch_index_from_road_option,
                                  low_level=self.use_low_level_segment)

        self.balance_label = param.balance_label if train else False
        self.balance_counts = [0 for _ in range(self.output_dim)]
        self.balance_weights = [1.0 for _ in range(self.output_dim)]
        if not param.grouped_batch and (self.balance_label
                                        or self.single_label):
            self.balance_index()
        logger.info('loaded {} trajectories'.format(len(self)))
    def __init__(self, param: Parameter, num_words: int):
        torch.nn.Module.__init__(self)
        self.num_words = num_words
        self.encoder_type = param.encoder_type
        self.encoder_embedding_size = param.encoder_embedding_size
        self.encoder_hidden_size = param.encoder_hidden_size
        self.use_glove_embedding = param.use_glove_embedding
        self.use_low_level_segment = param.use_low_level_segment
        if param.model_level == 'low':
            self.onehot_dim = fetch_onehot_vector_dim(
                param.use_low_level_segment)
        else:
            self.onehot_dim = fetch_num_sentence_commands()

        # encoder
        if self.encoder_type == 'gru':
            if self.use_glove_embedding:
                self.embedding = glove_word_embedding_layer()
            else:
                self.embedding = torch.nn.Embedding(
                    self.num_words, self.encoder_embedding_size, padding_idx=0)
            self.encoder_gru = torch.nn.GRU(
                input_size=self.encoder_embedding_size,
                hidden_size=self.encoder_hidden_size,
                batch_first=True)
        elif self.encoder_type == 'onehot':
            self.encoder_linear = torch.nn.Linear(self.onehot_dim,
                                                  self.encoder_hidden_size)
        else:
            raise TypeError('invalid encoder_type {}'.format(
                self.encoder_type))
Exemple #3
0
 def __init__(self, param: Parameter, cmd: int):
     CheckpointBase.__init__(self, param)
     self.cmd = cmd
     self.param = param
     self.step_elapsed = 0
     self.use_low_level_segment = param.use_low_level_segment
     self.onehot_dim = fetch_onehot_vector_dim(param.use_low_level_segment)
     self.onehot_func = partial(onehot_from_index, use_low_level_segment=param.use_low_level_segment)
     self.encoder_hidden, self.decoder_hidden, self.images = None, None, []
     self.initialize()
    def __init__(self, param: Parameter, num_words: int):
        ModelBase.__init__(self, param)

        self.use_low_level_segment: bool = param.use_low_level_segment
        self.onehot_dim: int = fetch_onehot_vector_dim(
            self.use_low_level_segment)

        self.image_processor = ConvLayers(self.channels)
        self.linear = torch.nn.Linear(self.image_vec_dim, self.out_linear)
        self.decoders = ModuleList(
            [Decoder(param, self.in_decoder) for _ in range(self.onehot_dim)])
Exemple #5
0
def fetch_dataset_list(param: Parameter,
                       is_control: bool) -> List[LowLevelDataset]:
    split_train = param.split_train
    assert not split_train
    dataset_names, control_dataset_dict, stop_dataset_dict, indices = _fetch_name_index_list(
        param)
    dataset_dict = control_dataset_dict if param.model_level == 'low' and is_control else stop_dataset_dict
    indices = list(chain.from_iterable(indices))
    num_labels = fetch_onehot_vector_dim(
        param.use_low_level_segment) if not param.single_label else 1
    label_indices = [[] for _ in range(num_labels)]
    cls = _fetch_dataset_class(param.model_level, param.ablation_type)
    for name, index in indices:
        road_option = dataset_dict[name].get_road_option_from_trajectory(index)
        option_index = fetch_index_from_road_option(
            road_option, param.use_low_level_segment)
        label_indices[option_index].append((name, index))
    return [
        cls(param, dataset_names, dataset_dict, l, True) for l in label_indices
    ]
Exemple #6
0
def fetch_dataset_list_pair(param: Parameter, is_control: bool) -> \
        Tuple[List[LowLevelDataset], List[LowLevelDataset]]:
    split_train = param.split_train
    train_ratio = param.train_ratio
    assert split_train
    dataset_names, control_dataset_dict, stop_dataset_dict, indices = _fetch_name_index_list(
        param)
    dataset_dict = control_dataset_dict if param.model_level == 'low' and is_control else stop_dataset_dict
    indices = list(chain.from_iterable(indices))

    num_labels = fetch_onehot_vector_dim(
        param.use_low_level_segment) if not param.single_label else 1
    train_indices, valid_indices = [], []
    for subindices in indices:
        num_segments = len(subindices)
        num_train = int(round(num_segments * train_ratio))
        train_indices.append(subindices[:num_train])
        valid_indices.append(subindices[num_train:])
    train_indices = list(chain.from_iterable(train_indices))
    valid_indices = list(chain.from_iterable(valid_indices))

    train_label_indices = [[] for _ in range(num_labels)]
    valid_label_indices = [[] for _ in range(num_labels)]
    for name, index in train_indices:
        road_option = dataset_dict[name].get_road_option_from_trajectory(index)
        option_index = fetch_index_from_road_option(road_option)
        train_label_indices[option_index].append((name, index))
    for name, index in valid_indices:
        road_option = dataset_dict[name].get_road_option_from_trajectory(index)
        option_index = fetch_index_from_road_option(road_option)
        valid_label_indices[option_index].append((name, index))
    cls = _fetch_dataset_class(param.model_level, param.ablation_type)
    train_dataset_list = [
        cls(param, dataset_names, dataset_dict, l, True)
        for l in train_label_indices
    ]
    valid_dataset_list = [
        cls(param, dataset_names, dataset_dict, l, False)
        for l in valid_label_indices
    ]
    return train_dataset_list, valid_dataset_list
Exemple #7
0
def onehot_from_index(cmd: int, use_low_level_segment: bool) -> torch.Tensor:
    onehot_dim = fetch_onehot_vector_dim(use_low_level_segment)
    return fetch_onehot_vector_from_index(cmd, use_low_level_segment).view(
        1, onehot_dim)