示例#1
0
 def _prepare_component_wise_processing(self,
                                        w_tensor,
                                        latent_indicators_tensor,
                                        *input_tensors,
                                        zero_prob_val=0.0):
     indices, values = self._combine_values_and_indices(input_tensors)
     # Create a 3D tensor with dimensions [batch, sum node, sum input]
     # The last axis will have zeros when the sum size is less than the max sum size
     if all(np.array_equal(indices[0], ind) for ind in indices):
         # In case all sum nodes model the same sum, we can just use broadcasting
         reducible_values = tf.reshape(
             tf.gather(values, indices[0], axis=1),
             (-1, 1, self._max_sum_size))
     elif len(set(self._sum_sizes)) == 1:
         # In case all sum sizes are the same, use gather and reshape accordingly
         indices_flat = list(itertools.chain(*indices))
         reducible_values = tf.reshape(
             tf.gather(values, indices_flat, axis=1),
             (-1, self._num_sums, self._max_sum_size))
     else:
         reducible_values = utils.gather_cols_3d(values,
                                                 indices,
                                                 pad_elem=zero_prob_val,
                                                 name="GatherToReducible")
     w_tensor = tf.expand_dims(w_tensor, axis=self._batch_axis)
     if latent_indicators_tensor is not None:
         latent_indicators_tensor = tf.reshape(latent_indicators_tensor,
                                               shape=(-1, self._num_sums,
                                                      self._max_sum_size))
     return w_tensor, latent_indicators_tensor, reducible_values
示例#2
0
    def _compute_log_mpe_path(self, counts, *value_values, add_random=False,
                          use_unweighted=False, sample=False, sample_prob=None):
        # Path per product node is calculated by permuting backwards to the
        # input nodes, then adding the appropriate counts per input, and then
        # scattering the summed counts to value inputs

        # Check inputs
        if not self._values:
            raise StructureError("%s is missing input values." % self)

        def permute_counts(input_sizes):
            # Function that permutes count values, backward to inputs.
            counts_indices_list = []

            def range_with_blocksize(start, stop, block_size, step):
                # A function that produces an arithmetic progression (Similar to
                # Python's range() function), but for a given block-size of
                # consecutive numbers.
                # E.g: range_with_blocksize(start=0, stop=20, block_size=3, step=5)
                # = [0, 1, 2, 5, 6, 7, 10, 11, 12, 15, 16, 17]
                counts_indices = []
                it = 0
                low = start
                high = low + block_size
                while low < stop:
                    counts_indices = counts_indices + list(range(low, high))
                    it += 1
                    low = start + (it * step)
                    high = low + block_size

                return counts_indices

            for inp, inp_size in enumerate(input_sizes):
                block_size = int(self._num_prods / np.prod(input_sizes[:inp+1]))
                step = int(np.prod(input_sizes[inp:]))
                for i in range(inp_size):
                    start = i * block_size
                    stop = self._num_prods - (block_size * (inp_size-i-1))
                    counts_indices_list.append(range_with_blocksize(start, stop,
                                                                    block_size,
                                                                    step))

            return counts_indices_list

        if (len(self._input_sizes) > 1):
            permuted_indices = permute_counts(self._input_sizes)
            summed_counts = tf.reduce_sum(utils.gather_cols_3d(counts, permuted_indices),
                                          axis=-1)
            processed_counts_list = tf.split(summed_counts, self._input_sizes, axis=-1)
        else:  # For single input case, i.e, when _num_prods = 1
            summed_counts = self._input_sizes[0] * [counts]
            processed_counts_list = [tf.concat(values=summed_counts, axis=-1)]

        # Zip lists of processed counts and value_values together for scattering
        value_counts = zip(processed_counts_list, value_values)

        return self._scatter_to_input_tensors(*value_counts)
示例#3
0
    def _compute_log_mpe_path(self, counts, *value_values,
                              use_unweighted=False, sample=False, sample_prob=None):
        # Check inputs
        if not self._values:
            raise StructureError("%s is missing input values." % self)

        # For each unique (input, index) pair in the values list, collect counts
        # index of all counts for which the pair is a child of
        gather_counts_indices, unique_inputs = self._collect_count_indices_per_input()

        if self._num_prods > 1:
            # Gather columns from the counts tensor, per unique (input, index) pair
            reducible_values = utils.gather_cols_3d(counts, gather_counts_indices)

            # Sum gathered counts together per unique (input, index) pair
            summed_counts = tf.reduce_sum(reducible_values, axis=-1)
        else:
            # Calculate total inputs size
            inputs_size = sum([v_input.get_size(v_value) for v_input, v_value in
                               zip(self._values, value_values)])

            # Tile counts only if input is larger than 1
            summed_counts = (tf.tile(counts, [1, inputs_size]) if inputs_size > 1
                             else counts)

        # For each unique input in the values list, calculate the number of
        # unique indices
        unique_inp_sizes = [len(v) for v in unique_inputs.values()]

        # Split the summed-counts tensor per unique input, based on input-sizes
        unique_input_counts = tf.split(summed_counts, unique_inp_sizes, axis=-1) \
            if len(unique_inp_sizes) > 1 else [summed_counts]

        # Scatter each unique-counts tensor to the respective input, only once
        # per unique input in the values list
        scattered_counts = [None] * len(self._values)
        for (node, inds), cnts in zip(unique_inputs.items(), unique_input_counts):
            for i, (inp, val) in enumerate(zip(self._values, value_values)):
                if inp.node == node:
                    scattered_counts[i] = utils.scatter_cols(
                        cnts, inds, int(val.get_shape()[0 if val.get_shape().ndims
                                                        == 1 else 1]))
                    break

        return scattered_counts
示例#4
0
 def _compute_value_common(self, *value_tensors, padding_value=0.0):
     """Common actions when computing value."""
     # Check inputs
     if not self._values:
         raise StructureError("%s is missing input values." % self)
     # Prepare values
     if self._num_prods > 1:
         indices, value_tensor = self._combine_values_and_indices(value_tensors)
         # Create a 3D tensor with dimensions [batch, num-prods, max-prod-input-sizes]
         # The last axis will have zeros or ones (for log or non-log) when the
         # prod-input-size < max-prod-input-sizes
         reducible_values = utils.gather_cols_3d(value_tensor, indices,
                                                 pad_elem=padding_value)
         return reducible_values
     else:
         # Gather input tensors
         value_tensors = self._gather_input_tensors(*value_tensors)
         return tf.concat(value_tensors, 1)
示例#5
0
    def _accumulate_uniq_values_and_split(self,
                                          flat_col_indices,
                                          flat_tensor_offsets,
                                          x,
                                          unique_tensors_offsets_dict,
                                          gather_segments_only=False):
        """Helper method that is used for summing counts within the layer before passing it on
        by means of gathering from the (padded) weighted values and reducing afterwards.

        Args:
            flat_col_indices (numpy.ndarray): An array containing the flattened column indices to
                gather from the concatenation of unqiue value tensors.
            flat_tensor_offsets (numpy.ndarray): An array containing the flattened tensor offsets
                in the concatenation of the unique value tensors.
            x (Tensor): A ``Tensor`` to gather, accumulate per unique value tensor and finally
                split for scattering.
            unique_tensors_offsets_dict (OrderedDict): A mapping of ``Tensor`` -> offset
                corresponding to the unique value tensors and their offsets in the concatenation.
            gather_segments_only (bool): If ``True``, will transpose and gather on the zeroth
                axis, without 'zero-probability' padding so that the result can be accumulated
                using tf.segment_sum.

        Returns:
            A list of indices to be used for scattering the values of the list in the second
            return value, which is a list of accumulated values corresponding to the unique
            value Inputs of this node.
        """
        # Make a flat list containing the sum index for each of the 'concatenated' inputs
        sum_indices = []
        for i, size in enumerate(self._sum_sizes):
            sum_indices.extend([i for _ in range(size)])

        # For each unique tensor and index pair, we should have a list of indices to gather from
        # the reducible values tensor
        max_size = max(self._sum_sizes)
        unique_tensor_gather_indices = OrderedDict()
        unique_tensors_offsets_inverse = {
            v: k
            for k, v in unique_tensors_offsets_dict.items()
        }

        old_sum_index = 0
        start_of_current_sum = 0
        for i, (col, tensor_offset, sum_index) in enumerate(
                zip(flat_col_indices, flat_tensor_offsets, sum_indices)):
            # Give the offset of the current flat (axis 1) index, we get the input tensor that
            # feeds its value to it.
            tensor = unique_tensors_offsets_inverse[tensor_offset]
            if tensor not in unique_tensor_gather_indices:
                unique_tensor_gather_indices[tensor] = defaultdict(list)
            # For this tensor-column combination, we register the corresponding index to gather
            # from the padded 2D reducible tensor
            if sum_index != old_sum_index:
                old_sum_index = sum_index
                start_of_current_sum = i

            # Index of the column within the sum
            index_within_sum = i - start_of_current_sum

            # Given the index of the sum and the index of the column within, we can find the index
            # to gather for this particular column of the input tensor
            unique_tensor_gather_indices[tensor][col].append(index_within_sum +
                                                             sum_index *
                                                             max_size)

        # For each tensor that we have, we compute the scatter indices. Here we construct the
        # nested gather indices needed for gather_cols_3d.
        nested_gather_indices = []
        unique_tensor_lengths = []
        tensor_scatter_indices = OrderedDict()
        for tensor, col_to_gather_col in unique_tensor_gather_indices.items():
            gather_indices_sub = []
            tensor_scatter_indices[tensor] = []
            # Go over all possible indices
            for i in range(tensor.shape[1].value):
                # If this index is registered as one to gather for...
                if i in col_to_gather_col:
                    # ... then we append the gathering columns to the currently considered
                    # tensor column
                    gather_indices_sub.append(col_to_gather_col[i])
                    tensor_scatter_indices[tensor].append(i)
            # Length of the list of columns for each unique input value tensor
            unique_tensor_lengths.append(len(gather_indices_sub))
            # Will contain a list of lists. Inner lists correspond to columns to gather, while
            # outer list corresponds to the individual 'indexed' input nodes
            nested_gather_indices.extend(gather_indices_sub)

        # Gather columns from the counts tensor, per unique (input, index) pair
        if gather_segments_only:
            segment_ids = []
            for i, ind in enumerate(nested_gather_indices):
                segment_ids.extend([i for _ in range(len(ind))])
            num_sums_to_scatter = len(nested_gather_indices)
            nested_gather_indices = list(
                itertools.chain(*nested_gather_indices))
            transposed = tf.transpose(x)
            gathered = tf.gather(transposed, indices=nested_gather_indices)
            acccumulated = tf.reshape(
                tf.segment_sum(gathered, segment_ids=segment_ids),
                (num_sums_to_scatter, -1))
            acccumulated = tf.transpose(acccumulated)
        else:
            reducible_values = utils.gather_cols_3d(x, nested_gather_indices)
            # Sum gathered counts together per unique (input, index) pair
            acccumulated = tf.reduce_sum(reducible_values, axis=-1)

        # Split the summed-counts tensor per unique input, based on input-sizes
        accumulated_unique_tensor_values = tf.split(
            acccumulated, unique_tensor_lengths, axis=-1) \
            if len(unique_tensor_lengths) > 1 else [acccumulated]
        return tensor_scatter_indices, accumulated_unique_tensor_values