def _get_test_input_fn(self):

        dataset = tf.data.Dataset.from_generator(
            self._yield_test_samples, (tf.float32, tf.int32),
            output_shapes=(TensorShape(
                [Dimension(32), Dimension(32),
                 Dimension(3)]), TensorShape(Dimension(10))))
        dataset = dataset.map(lambda image, label: ({
            self.FEATURE_NAME: image
        }, label))
        dataset = dataset.batch(batch_size=self._batch_size)
        dataset = dataset.prefetch(self._prefetch_size)
        print_info("Dataset output sizes are: ")
        print_info(dataset.output_shapes)
        return dataset
    def _get_test_input_function(self):
        """
        Inheriting class must implement this
        :return: callable
        """
        dataset = tf.data.Dataset.from_generator(
            self._yield_test_samples, (tf.float32, tf.bool, tf.bool),
            output_shapes=(TensorShape([
                Dimension(self._hparams.frames_per_sample),
                Dimension(self._hparams.neff)
            ]),
                           TensorShape([
                               Dimension(self._hparams.frames_per_sample),
                               Dimension(self._hparams.neff)
                           ]),
                           TensorShape([
                               Dimension(self._hparams.frames_per_sample),
                               Dimension(self._hparams.neff),
                               Dimension(2)
                           ])))

        dataset = dataset.map(
            self.feature_map_func,
            num_parallel_calls=self._hparams.num_parallel_calls)

        dataset = dataset.batch(batch_size=self._hparams.batch_size,
                                drop_remainder=True)
        dataset = dataset.prefetch(self._hparams.prefetch_size)
        dataset = dataset.cache(
            filename=os.path.join(self.iterator_dir, "test_data_cache"))
        print_info("Dataset output sizes are: ")
        print_info(dataset.output_shapes)
        return dataset
Пример #3
0
    def _get_test_input_function(self):
        """
        Inheriting class must implement this
        :return: callable
        """
        dataset = tf.data.Dataset.from_generator(
            self._yield_test_samples, (tf.float32, tf.bool, tf.bool),
            output_shapes=(TensorShape([
                Dimension(self._hparams.frames_per_sample),
                Dimension(self._hparams.neff)
            ]),
                           TensorShape([
                               Dimension(self._hparams.frames_per_sample),
                               Dimension(self._hparams.neff)
                           ]),
                           TensorShape([
                               Dimension(self._hparams.frames_per_sample),
                               Dimension(self._hparams.neff),
                               Dimension(2)
                           ])))
        # Map the generator output as features as a dict and labels
        dataset = dataset.map(lambda x, y, z: ({
            self.FEATURE_1_NAME: x,
            self.FEATURE_2_NAME: y
        }, z))

        dataset = dataset.batch(batch_size=self._hparams.batch_size,
                                drop_remainder=True)
        dataset = dataset.prefetch(self._hparams.prefetch_size)
        # dataset = dataset.cache(filename=os.path.join(self.iterator_dir, "test_data_cache"))
        print_info("Dataset output sizes are: ")
        print_info(dataset.output_shapes)
        return dataset
    def _user_resize_func(self, sample, vad, label):
        """
        Function that sets up the sizes of the tensor, after execution of `tf.py_func` call
        :param data:
        :param label:
        :return:
        """

        sample = tf.reshape(sample,
                            shape=TensorShape([
                                Dimension(self._hparams.dummy_slicing_dim),
                                Dimension(self._hparams.neff)
                            ]))
        vad = tf.reshape(vad,
                         shape=TensorShape([
                             Dimension(self._hparams.dummy_slicing_dim),
                             Dimension(self._hparams.neff)
                         ]))
        label = tf.reshape(label,
                           shape=TensorShape([
                               Dimension(self._hparams.dummy_slicing_dim),
                               Dimension(self._hparams.neff),
                               Dimension(2)
                           ]))
        return ({self.FEATURE_1_NAME: sample, self.FEATURE_2_NAME: vad}, label)
Пример #5
0
    def call(self, inputs, training=None, mask=None):
        # print("sub_layer_name_suffix", tensorflow.executing_eagerly(), "mask", mask)
        # print(type(inputs))
        if isinstance(inputs, list):
            assert len(inputs) == 1
            input = inputs[0]
        else:
            input = inputs
        # input_shape = inputs.shape
        # print("input", input.shape)

        # Masking:
        input = self.masking_layer(input)

        sub_results_for_dense = []
        # Determine the input for main lstm layer:
        if self.architecture_kind == ArchitectureKind.BASIC:
            # Just use the basic feat vec's slice:
            for_lstm_main = self.slice_basic_feat_vec(input)
        else:
            # Connect sub lstm and sub nalu and concatenate them:
            value_slice_results = []
            ness_slice_results = []
            other_slice_results = {}
            for input_matching_info in self.kind_numbers_feat_vec_component_info + self.feat_numbers_feat_vec_component_info:
                slice_name = "slice_" + input_matching_info.name
                sliced_result = getattr(self, slice_name)(input)
                if "arrsize_num" in input_matching_info.name or "value_num" in input_matching_info.name:
                    value_slice_results.append(sliced_result)
                elif "ness_num" in input_matching_info.name:
                    ness_slice_results.append(sliced_result)
                else:
                    other_slice_results[slice_name] = sliced_result

            sub_results_for_main_lstm = []
            for sub_layer_name_suffix in [info.name for info in self.kind_numbers_feat_vec_component_info] \
                                         + ["member_num", "concatenate_values_num", "type_qual_name_num",
                                            "concatenate_ness_num"]:
                if sub_layer_name_suffix == "concatenate_values_num":
                    sliced_or_embed_result = concatenate(
                        value_slice_results, name="concatenate_values_num")
                elif sub_layer_name_suffix == "concatenate_ness_num":
                    sliced_or_embed_result = concatenate(
                        ness_slice_results, name="concatenate_ness_num")
                else:
                    sliced_or_embed_result = other_slice_results[
                        "slice_" + sub_layer_name_suffix]

                if "kind" in sub_layer_name_suffix:
                    sliced_or_embed_result = getattr(
                        self, "embedding_" +
                        sub_layer_name_suffix)(sliced_or_embed_result)
                    # The following leads to memory leaks:
                    if self.embedding_results is not None:
                        self.embedding_results.append(sliced_or_embed_result)
                # print(sliced_or_embed_result.name, sliced_or_embed_result.shape)
                sliced_or_embed_or_nalu_result = sliced_or_embed_result
                # lstm_or_nalu_result = getattr(self, "sub_lstm_" + sub_layer_name_suffix)(sliced_or_embed_result)
                # print(lstm_or_nalu_result.name, lstm_or_nalu_result.shape)
                if "kind" not in sub_layer_name_suffix:
                    sub_lstm_layer = getattr(
                        self, "lstm_sub_" + sub_layer_name_suffix, None)
                    if sub_lstm_layer is not None:
                        # Connect sub lstm:
                        sliced_or_embed_or_nalu_result = sub_lstm_layer(
                            sliced_or_embed_or_nalu_result)

                        # Connect sub nalu:
                        sub_nalu_layer = getattr(
                            self, "nalu_sub_" + sub_layer_name_suffix)
                        sliced_or_embed_or_nalu_result = sub_nalu_layer(
                            sliced_or_embed_or_nalu_result)
                    else:
                        # sub lstm and nalu are only allowed to be absent if it has been requested through
                        # nalu neuron count value of zero:
                        assert self.nalu_neuron_count == 0, self.nalu_neuron_count

                    # Concatenate depending on architecture. With nalu_neuron_count == 0, both are the same:
                    if self.architecture_kind == ArchitectureKind.EMBED_NALU_REK_ALL_IN_MAIN_LSTM \
                            or self.nalu_neuron_count == 0:
                        # With "rekurrent" nalu concat nalu output and input it to main lstm:
                        sub_results_for_main_lstm.append(
                            sliced_or_embed_or_nalu_result)
                    elif self.architecture_kind == ArchitectureKind.EMBED_NALU_NON_REK:
                        # With non-rekurrent nalu concat nalu output and input it to dense
                        sub_results_for_dense.append(
                            sliced_or_embed_or_nalu_result)
                    else:
                        assert False

                    # print(lstm_or_nalu_result.name, lstm_or_nalu_result.shape)
                else:
                    sub_results_for_main_lstm.append(
                        sliced_or_embed_or_nalu_result)

            for_lstm_main = concatenate(sub_results_for_main_lstm,
                                        name="concatenate_for_lstm_main")

        # Connect main lstm layer:
        lstm_main_layer = getattr(self, "lstm_main_layer", None)
        if lstm_main_layer is not None:
            lstm_main_result = lstm_main_layer(for_lstm_main)
        else:
            assert self.main_lstm_neuron_count <= 0, self.main_lstm_neuron_count
            # Reshape data to non-sequence format. Create new shape as tuple of ints according to for_lstm_main's shape:
            new_shape = [
                for_lstm_main.shape[0],
                for_lstm_main.shape[1] * for_lstm_main.shape[2]
            ]
            # The last dimension must be fixed! Therefore, this only works for fixed sequences length. In this
            # case the sequence length must be provided as negative lstm neuron count.
            if str(new_shape[0]) == "?":
                new_shape = [Dimension(None), -self.main_lstm_neuron_count]
            # Replace Dimensions with ints and replace placeholder in shape (i.e. "?" / Dimension(None)) with -1:
            for shape_part_index in range(len(new_shape)):
                if str(new_shape[shape_part_index]) == "?":
                    new_shape[shape_part_index] = -1
                else:
                    new_shape[shape_part_index] = int(
                        new_shape[shape_part_index])
            lstm_main_result = tensorflow.reshape(for_lstm_main, new_shape)
        # print("Reshaped from", for_lstm_main.shape, "to", lstm_main_result.shape)

        # Determine the input for first dense layer:
        if self.architecture_kind == ArchitectureKind.BASIC:
            # Main nalu
            nalu_main_layer = getattr(self, "nalu_main_layer", None)
            if nalu_main_layer is not None:
                for_dense = nalu_main_layer(lstm_main_result)
            else:
                assert self.nalu_neuron_count == 0, self.nalu_neuron_count
                for_dense = lstm_main_result
        elif self.architecture_kind == ArchitectureKind.EMBED_NALU_REK_ALL_IN_MAIN_LSTM or self.nalu_neuron_count == 0:
            assert len(sub_results_for_dense) == 0
            for_dense = lstm_main_result
        elif self.architecture_kind == ArchitectureKind.EMBED_NALU_NON_REK:
            assert len(sub_results_for_dense) > 0, sub_results_for_dense
            for_dense = concatenate([lstm_main_result] + sub_results_for_dense,
                                    name="concatenate_for_dense")
        else:
            assert False

        # Main Dense
        dense_fc_layer = getattr(self, "dense_fc_layer", None)
        if dense_fc_layer is not None:
            dense_result = dense_fc_layer(for_dense)
        else:
            assert self.dense_neuron_count == 0, self.dense_neuron_count
            dense_result = for_dense

        dense_output_result = self.dense_output_layer(dense_result)

        if not tensorflow.executing_eagerly() and self.model_graph is None:
            # We are currently in non eager mode and have a graph which can be saved:
            self.model_graph = dense_output_result.graph
        return dense_output_result