Ejemplo n.º 1
0
def invert_permutation():
    x_h = tf.placeholder(tf.float32, [None, 5])
    x = np.array(
        [[3, 4, 0, 2, 1],
         [2, 1, 3, 4, 0]],
        float
    )
    dim = int(x_h.get_shape()[-1])
    size = tf.cast(tf.shape(x_h)[0], tf.float32)
    delta = tf.cast(tf.shape(x_h)[-1], tf.float32)
    rg = tf.range(0, size*delta, delta, dtype=tf.float32)
    rg = tf.reshape(rg, [-1, 1])
    rg = tf.tile(rg, [1, dim])
    x_a = tf.add(x_h, rg)
    flat = tf.reshape(x_a, [-1])
    iperm = tf.invert_permutation(tf.cast(flat, tf.int32))
    rs = tf.reshape(iperm, [-1, dim])
    rs_f = tf.subtract(rs, tf.cast(rg, tf.int32))
    with tf.Session() as sess:
        r_rg = sess.run(rg, feed_dict={x_h: x})
        print("rg:{}".format(r_rg))
        r = sess.run(flat, feed_dict={x_h: x})
        print(r)
        r_rs = sess.run(rs_f, feed_dict={x_h: x})
        print("final:\n{}".format(r_rs))
        check = sess.run(tf.invert_permutation([2, 1, 3, 4, 0]))
        print("check:\n{}".format(check))
Ejemplo n.º 2
0
    def _build_nlpd(self, weights, means, covars, link_covars, inducing_inputs,
                    kernel_chol, kernlink_chol, test_inputs, test_outputs):
        '''
        returns  -lpd_all (tensor for all n,p,s)
        '''
        lpd_all = tf.zeros(
            [self.predict_samples,
             tf.shape(test_inputs)[0], self.output_dim])
        #lpd = 0
        dim_out = self.output_dim
        kern_prods, kern_sums = self._build_interim_vals(
            kernel_chol, inducing_inputs, test_inputs)
        for i in range(self.num_components):
            covar_input = covars[i, :, :] if self.diag_post else covars[
                i, :, :, :]
            link_cov_input = None if self.diag_post else link_covars[
                i]  # list of R covar link mats (each Qr x Qr)
            latent_samples = self._build_samples(kern_prods, kern_sums,
                                                 kernlink_chol, means[i, :, :],
                                                 covar_input, link_cov_input,
                                                 self.predict_samples)
            # reorder latent according to 'inverted' block struct order
            latent_j = [j for b in self.block_struct
                        for j in b]  #implicit order of j in latent_samples
            revert_j = tf.invert_permutation(latent_j)
            latent_samples = tf.gather(latent_samples, revert_j,
                                       axis=2)  # reorder to j=1, j=2, ...

            lpd_all += weights[i] * self.likelihood.nlpd_cond_prob(
                test_outputs, latent_samples)

        return -lpd_all
Ejemplo n.º 3
0
    def _build_ell(self, weights, means, covars, link_covars, inducing_inputs,
                   kernel_chol, kernlink_chol, train_inputs, train_outputs):
        # generate `within' kernel auxiliary matrices
        kern_prods, kern_sums = self._build_interim_vals(
            kernel_chol, inducing_inputs, train_inputs)
        ell = 0
        for i in range(self.num_components):
            covar_input = covars[i, :, :] if self.diag_post else covars[
                i, :, :, :]
            link_cov_input = None if self.diag_post else link_covars[
                i]  # list of R covar link mats (each Qr x Qr)
            latent_samples = self._build_samples(kern_prods, kern_sums,
                                                 kernlink_chol, means[i, :, :],
                                                 covar_input, link_cov_input,
                                                 self.ell_samples)
            # reorder latent according to 'inverted' block struct order
            latent_j = [j for b in self.block_struct
                        for j in b]  #implicit order of j in latent_samples
            revert_j = tf.invert_permutation(latent_j)
            latent_samples = tf.gather(latent_samples, revert_j,
                                       axis=2)  # reorder to j=1, j=2, ...

            ell += weights[i] * tf.reduce_sum(
                self.likelihood.log_cond_prob(train_outputs, latent_samples))

        return ell / self.ell_samples
Ejemplo n.º 4
0
def multilevel_roi_align(features, rcnn_boxes, resolution):
    """
    Args:
        features ([tf.Tensor]): 4 FPN feature level 2-5
        rcnn_boxes (tf.Tensor): nx4 boxes
        resolution (int): output spatial resolution
    Returns:
        NxC x res x res
    """
    assert len(features) == 4, features
    # Reassign rcnn_boxes to levels
    level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes)
    all_rois = []

    # Crop patches from corresponding levels
    for i, boxes, featuremap in zip(itertools.count(), level_boxes, features):
        with tf.name_scope('roi_level{}'.format(i + 2)):
            boxes_on_featuremap = boxes * (1.0 / cfg.FPN.ANCHOR_STRIDES[i])
            all_rois.append(roi_align(featuremap, boxes_on_featuremap, resolution))

    # this can fail if using TF<=1.8 with MKL build
    all_rois = tf.concat(all_rois, axis=0)  # NCHW
    # Unshuffle to the original order, to match the original samples
    level_id_perm = tf.concat(level_ids, axis=0)  # A permutation of 1~N
    level_id_invert_perm = tf.invert_permutation(level_id_perm)
    all_rois = tf.gather(all_rois, level_id_invert_perm)
    return all_rois
Ejemplo n.º 5
0
    def calculate_allocation_weighting(self, usage_vector):
        """

        :param: usage vector: tensor of shape [batch_size, memory_size]
        :return: allocation tensor of shape [batch_size, memory_size]
        """
        usage_vector = Memory.epsilon + (1 - Memory.epsilon) * usage_vector

        # We're sorting the "-self.usage_vector" because top_k returns highest values and we need the lowest
        highest_usage, inverse_indices = tf.nn.top_k(-usage_vector,
                                                     k=self.memory_size)
        lowest_usage = -highest_usage

        allocation_scrambled = (1 - lowest_usage) * tf.cumprod(
            lowest_usage, axis=1, exclusive=True)

        # allocation is not in the correct order. alloation[i] contains the sorted[i] value
        # reversing the already inversed indices for each batch
        indices = tf.stack([
            tf.invert_permutation(batch_indices)
            for batch_indices in tf.unstack(inverse_indices)
        ])
        allocation = tf.stack([
            tf.gather(mem, ind) for mem, ind in zip(
                tf.unstack(allocation_scrambled), tf.unstack(indices))
        ])

        return allocation
Ejemplo n.º 6
0
def _invert_permutation(input, row_count):
    '''wrapper for matrix'''
    rows = []
    for i in range(row_count):
        row = input[i,:]
        rows.append(tf.invert_permutation(row))
    return tf.cast(tf.stack(rows, axis=0), tf.float32)
Ejemplo n.º 7
0
def test_mathops_seven():
    mathops_7 = MathOpsSeven(seed=19)
    in_node_1 = mathops_7.get_placeholder("input_1", data_type=tf.int32)
    in_node_2 = mathops_7.get_placeholder("input_2")
    w = tf.Variable(tf.random_normal([8, 10], dtype=tf.float64), name="w")
    b = tf.cast(tf.invert_permutation(in_node_1), dtype=tf.float64)
    n1 = tf.nn.xw_plus_b(in_node_2, w, b)
    n2 = tf.cast(tf.fill([10, 10], 1.2345), dtype=tf.float64)
    n3 = tf.add(n1, n2)
    n4 = tf.nn.relu6(n3)
    n5 = tf.nn.moments(n4, axes=[1, 0], keep_dims=True)
    n6 = tf.meshgrid(
        n5, tf.Variable(tf.random_normal([2, 1, 1], dtype=tf.float64)))
    n7 = tf.parallel_stack([n6[1], n6[0], n6[1]])  # (3,2,2)
    n8 = tf.nn.normalize_moments(n7[0], n7[1], n7[2], None)  # (2,2,2)
    out_node = tf.pad(n8,
                      tf.constant([[1, 1], [1, 1], [1, 1]]),
                      "REFLECT",
                      name="output")

    placeholders = [in_node_1, in_node_2]
    predictions = [out_node]
    # Run and persist
    tfp = TensorFlowPersistor(save_dir="g_07")
    predictions_after_freeze = tfp.set_placeholders(placeholders) \
        .set_output_tensors(predictions) \
        .set_test_data(mathops_7.get_test_data()) \
        .build_save_frozen_graph()
    print(predictions_after_freeze[0].shape)
Ejemplo n.º 8
0
def argmax_breaking_ties(x,
                         num_samples=1,
                         keepdims=False,
                         name="ArgMaxBreakingTies",
                         axis=-1):
    with tf.name_scope(name):
        axis = (axis + len(x.shape)) % len(x.shape)
        if axis != len(x.shape) - 1:
            permutation = [
                i if i != axis else len(x.shape) - 1
                for i in range(len(x.shape) - 1)
            ] + [axis]
            x = tf.transpose(x, permutation)
            permutation_inverse = tf.invert_permutation(permutation)
        else:
            permutation_inverse = None
        eq_max = tf.equal(x, tf.reduce_max(x, axis=-1, keepdims=True))
        logits = tf.log(tf.to_float(eq_max))
        argmax = multinomial_sample(logits, num_samples)
        if keepdims:
            if permutation_inverse is not None:
                return tf.transpose(argmax, permutation_inverse)
            return argmax
        elif num_samples != 1:
            raise ValueError("Cannot take out last dim if num_samples > 1")
        return tf.squeeze(argmax, axis=-1)
Ejemplo n.º 9
0
    def _build_predict(self, weights, means, covars, link_covars, inducing_inputs,
                       kernel_chol, kernlink_chol, test_inputs):
        kern_prods, kern_sums = self._build_interim_vals(kernel_chol, inducing_inputs, test_inputs)
        pred_means = util.init_list(0.0, [self.num_components])
        pred_vars = util.init_list(0.0, [self.num_components])
        for i in range(self.num_components):
            covar_input = covars[i, :, :] if self.diag_post else covars[i, :, :, :]
            link_cov_input = None if self.diag_post else link_covars[i] # list of R covar link mats (each Qr x Qr)
            # generate f|lambda distribution parameters
            latent_samples = self._build_samples(kern_prods, kern_sums, kernlink_chol,
                                                 means[i, :, :], covar_input, link_cov_input, self.predict_samples)
            # reorder latent according to 'inverted' block struct order
            latent_j = [j for r in self.block_struct for j in r] #implicit order of j in latent_samples
            revert_j = tf.invert_permutation(latent_j)
            latent_samples = tf.gather(latent_samples, revert_j, axis=2) # reorder to j=1, j=2, ...
            # generate predicted y = Wf
            pred_means[i], pred_vars[i] = self.likelihood.predict(latent_samples)

        pred_means = tf.stack(pred_means, 0)
        pred_vars = tf.stack(pred_vars, 0)

        # Compute the mean and variance of the gaussian mixture from their components.
        weights = tf.expand_dims(tf.expand_dims(weights, 1), 1)
        weighted_means = tf.reduce_sum(weights * pred_means, 0)
        weighted_vars = (tf.reduce_sum(weights * (pred_means ** 2 + pred_vars), 0) -
                         tf.reduce_sum(weights * pred_means, 0) ** 2)

        return weighted_means, weighted_vars
Ejemplo n.º 10
0
def multilevel_roi_align(features, rcnn_boxes, resolution):
    """
    Args:
        features ([tf.Tensor]): 4 FPN feature level 2-5
        rcnn_boxes (tf.Tensor): nx4 boxes
        resolution (int): output spatial resolution
    Returns:
        NxC x res x res
    """
    assert len(features) == 4, features
    # Reassign rcnn_boxes to levels
    level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes)
    all_rois = []

    # Crop patches from corresponding levels
    for i, boxes, featuremap in zip(itertools.count(), level_boxes, features):
        with tf.name_scope('roi_level{}'.format(i + 2)):
            boxes_on_featuremap = boxes * (1.0 / cfg.FPN.ANCHOR_STRIDES[i])
            all_rois.append(roi_align(featuremap, boxes_on_featuremap, resolution))

    all_rois = tf.concat(all_rois, axis=0)  # NCHW
    # Unshuffle to the original order, to match the original samples
    level_id_perm = tf.concat(level_ids, axis=0)  # A permutation of 1~N
    level_id_invert_perm = tf.invert_permutation(level_id_perm)
    all_rois = tf.gather(all_rois, level_id_invert_perm)
    return all_rois
Ejemplo n.º 11
0
def inverse_selection(y, presence, batch_size=1, name='inverse_selection'):
    with tf.variable_scope(name):
        idx = tf.reshape(tf.range(tf.size(y)), tf.shape(y))
        idx = tf.reshape(select_present(idx, presence, batch_size), (-1, ))
        idx = tf.invert_permutation(idx)

        x = tf.gather(tf.reshape(y, (-1, )), idx)
    return tf.reshape(x, tf.shape(y))
def batch_invert_permutation(permutations):
    """Returns batched `tf.invert_permutation` for every row in `permutations`."""
    with tf.name_scope('batch_invert_permutation', values=[permutations]):
        unpacked = tf.unstack(permutations)
        inverses = [
            tf.invert_permutation(permutation) for permutation in unpacked
        ]
        return tf.stack(inverses)
Ejemplo n.º 13
0
    def predict_sym(self, obs_ph, act_ph):
        """
        Same batch fed into all models. Randomly output one of the predictions for each observation.
        :param obs_ph: (batch_size, obs_space_dims)
        :param act_ph: (batch_size, act_space_dims)
        :return: (batch_size, obs_space_dims)
        """
        original_obs = obs_ph
        # shuffle
        perm = tf.range(0, limit=tf.shape(obs_ph)[0], dtype=tf.int32)
        perm = tf.random.shuffle(perm)
        obs_ph, act_ph = tf.gather(obs_ph, perm), tf.gather(act_ph, perm)
        obs_ph, act_ph = tf.split(obs_ph, self.num_models,
                                  axis=0), tf.split(act_ph,
                                                    self.num_models,
                                                    axis=0)

        delta_preds = []
        with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
            for i in range(self.num_models):
                with tf.variable_scope('model_{}'.format(i), reuse=True):
                    assert self.normalize_input
                    in_obs_var = (obs_ph[i] - self._mean_obs_var[i]) / (
                        self._std_obs_var[i] + 1e-8)
                    in_act_var = (act_ph[i] - self._mean_act_var[i]) / (
                        self._std_act_var[i] + 1e-8)
                    input_var = tf.concat([in_obs_var, in_act_var], axis=1)
                    mlp = MLP(
                        self.name + '/model_{}'.format(i),
                        output_dim=2 * self.obs_space_dims,
                        hidden_sizes=self.hidden_sizes,
                        hidden_nonlinearity=self.hidden_nonlinearity,
                        output_nonlinearity=self.output_nonlinearity,
                        input_var=input_var,
                        input_dim=self.obs_space_dims + self.action_space_dims,
                    )

                mean, logvar = tf.split(mlp.output_var, 2, axis=-1)
                logvar = self.max_logvar - tf.nn.softplus(self.max_logvar -
                                                          logvar)
                logvar = self.min_logvar + tf.nn.softplus(logvar -
                                                          self.min_logvar)
                delta_pred = mean + tf.random.normal(
                    shape=tf.shape(mean)) * tf.exp(logvar)
                # denormalize
                delta_pred = delta_pred * self._std_delta_var[
                    i] + self._mean_delta_var[i]
                delta_preds.append(delta_pred)

        delta_preds = tf.concat(delta_preds, axis=0)

        # unshuffle
        perm_inv = tf.invert_permutation(perm)
        # next_obs = clip(obs + delta_pred
        next_obs = original_obs + tf.gather(delta_preds, perm_inv)
        next_obs = tf.clip_by_value(next_obs, -1e2, 1e2)
        return next_obs
Ejemplo n.º 14
0
    def batch_unsort(tensor, indices):
        """Permute each batch in a batch first tensor according to tensor
        of indices.
        """
        unpacked = tf.unstack(indices)
        indices_inverted = tf.stack(
            [tf.invert_permutation(permutation) for permutation in unpacked])

        unpacked = zip(tf.unstack(tensor), tf.unstack(indices_inverted))
        return tf.stack([tf.gather(value, index) for value, index in unpacked])
Ejemplo n.º 15
0
    def _build(self, input=None):
        n_features = self._n_features = get_static_shape(input)[self.axis]
        permutation = np.arange(n_features, dtype=np.int32)
        self._random_state.shuffle(permutation)

        self._permutation = model_variable(
            'permutation', dtype=tf.int32, initializer=permutation,
            trainable=False
        )
        self._inv_permutation = tf.invert_permutation(self._permutation)
Ejemplo n.º 16
0
    def _get_centered_ranks_tensor(self, R):
        flatten_R = tf.reshape(R, (-1, ))
        _, sorted_index = tf.nn.top_k(
            -1.0 * flatten_R, k=tf.shape(flatten_R)[0], sorted=False)
        perm_index = tf.cast(tf.invert_permutation(sorted_index), tf.float32)
        perm_index = tf.reshape(perm_index, tf.shape(R))
        centered_rank_R = perm_index / (
            tf.cast(tf.shape(flatten_R)[0], tf.float32) - 1.0) - 0.5

        return centered_rank_R
Ejemplo n.º 17
0
def testFoldl():
    x_h = tf.placeholder(tf.int32, [None, 5])
    x = np.array(
        [[3, 4, 0, 2, 1],
         [2, 4, 3, 0, 1]]
    )
    fd = tf.foldl(
        lambda a, b: tf.stack(a, tf.invert_permutation(b)), x_h)
    with tf.Session() as sess:
        r = sess.run(fd, feed_dict={x_h: x})
        print(r)
Ejemplo n.º 18
0
def batch_invert_permutation(permutations):
    """Returns batched `tf.invert_permutation` for every row in `permutations`."""
    with tf.name_scope('batch_invert_permutation', values=[permutations]):
        perm = tf.cast(permutations, tf.float32)
        dim = int(perm.get_shape()[-1])
        size = tf.cast(tf.shape(perm)[0], tf.float32)
        delta = tf.cast(tf.shape(perm)[-1], tf.float32)
        rg = tf.range(0, size * delta, delta, dtype=tf.float32)
        rg = tf.expand_dims(rg, 1)
        rg = tf.tile(rg, [1, dim])
        perm = tf.add(perm, rg)
        flat = tf.reshape(perm, [-1])
        perm = tf.invert_permutation(tf.cast(flat, tf.int32))
        perm = tf.reshape(perm, [-1, dim])
        return tf.subtract(perm, tf.cast(rg, tf.int32))
Ejemplo n.º 19
0
    def call(self, x, instances):
        """
        Args:
            x (list[Tensor]): A list of feature maps with scales matching
                those used to construct this module.
            instances (SparseBoxList):
        Returns:
            Tensor:
                A tensor of shape (M, output_size, output_size, C) where M is
                the total number of boxes aggregated over all N batch images
                and C is the number of channels in `x`.
        """
        num_level_assignments = len(self.level_poolers)

        assert len(x) == num_level_assignments, (
            "unequal value, num_level_assignments={}, but x is list of {} "
            "Tensors".format(num_level_assignments, len(x)))

        if num_level_assignments == 1:
            return self.level_poolers[0](
                x[0], instances.data.boxes, instances.indices[:, 0]
            )

        level_assignments = assign_boxes_to_levels(
            instances.data,
            self.min_level,
            self.max_level,
            self.canonical_box_size,
            self.canonical_level
        )

        output_inds, output_data = [], []
        for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)):
            level_inds = tf.where(tf.equal(level_assignments, level))[:, 0]
            level_boxes = tf.gather(instances.data.boxes, level_inds)
            level_box_inds = tf.gather(instances.indices[:, 0], level_inds)
            level_data = pooler(x_level, level_boxes, level_box_inds)
            output_inds.append(tf.cast(level_inds, tf.int32))
            output_data.append(level_data)
            tf.summary.scalar(
                'roi_align/num_roi_level_{}'.format(level + 2), tf.size(level_inds))
        output_data = tf.concat(output_data, axis=0)
        output_inds = tf.concat(output_inds, axis=0)
        output_inds_invert_perm = tf.invert_permutation(output_inds)
        output = tf.gather(output_data, output_inds_invert_perm)

        return output
Ejemplo n.º 20
0
    def calculate_allocation_weighting(self, usage_vector):
        """
        :param: usage vector: tensor of shape [batch_size, memory_size]
        :return: allocation tensor of shape [batch_size, memory_size]
        """

        usage_vector = Memory.epsilon + (1 - Memory.epsilon) * usage_vector

        #usage_vector = tf.Print(usage_vector, [usage_vector[0][0], usage_vector[0][1]], "\nusage_vector_in_calculate_allocation_weighting_func: \n", summarize = 1)

        # We're sorting the "-self.usage_vector" because top_k returns highest values and we need the lowest
        highest_usage, inverse_indices = tf.nn.top_k(-usage_vector,
                                                     k=self.memory_size)

        #highest_usage = tf.Print(highest_usage, [highest_usage[0][0], highest_usage[0][1]], "\nhighest_usage_in_calculate_allocation_weighting_func: \n", summarize = 1)

        #inverse_indices = tf.Print(inverse_indices, [inverse_indices[0][0], inverse_indices[0][1]], "\ninverse_indices_in_calculate_allocation_weighting_func: \n", summarize = 1)

        lowest_usage = -highest_usage

        #lowest_usage = tf.Print(lowest_usage, [lowest_usage[0][0], lowest_usage[0][1]], "\nlowest_usage_in_calculate_allocation_weighting_func: \n", summarize = 1)

        allocation_scrambled = (1 - lowest_usage) * tf.cumprod(
            lowest_usage, axis=1, exclusive=True)

        #allocation_scrambled = tf.Print(allocation_scrambled, [allocation_scrambled[0][0], allocation_scrambled[0][1]],
        #                                "\nallocation_scrambled: \n", summarize = 1)

        # allocation is not in the correct order. alloation[i] contains the sorted[i] value
        # reversing the already inversed indices for each batch
        indices = tf.stack([
            tf.invert_permutation(batch_indices)
            for batch_indices in tf.unstack(inverse_indices)
        ])

        #indices = tf.Print(indices, [indices[0][0], indices[0][1]], "\nindices: \n", summarize = 1)

        allocation = tf.stack([
            tf.gather(mem, ind) for mem, ind in zip(
                tf.unstack(allocation_scrambled), tf.unstack(indices))
        ])

        #allocation = tf.Print(allocation, [allocation[0][0], allocation[0][1]],
        #                        "\nallocation: \n", summarize = 1)

        return allocation
Ejemplo n.º 21
0
def multilevel_roi_align(features, rcnn_boxes, resolution):
    """
    Args:
        features ([tf.Tensor]): 4 FPN feature level P2-5, each with BS X NumChannel X H_feature X W_feature
        rcnn_boxes (tf.Tensor): t x 5, t is the number of sampled boxes
        resolution (int): output spatial resolution, scalar
    Returns:
        all_rois: Num_fg_boxes x NumChannel x H_roi x W_roi
    """
    assert len(features) == 4, features
    # Reassign rcnn_boxes to levels
    level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes)
    all_rois = []

    # Crop patches from corresponding levels
    for i, boxes, featuremap in zip(itertools.count(), level_boxes, features):
        with tf.name_scope('roi_level{}'.format(i + 2)):

            # coordinate system fix for boxes
            boxes = tf.concat(
                (boxes[:, :1], boxes[:, 1:] - 0.5 * cfg.FPN.ANCHOR_STRIDES[i]),
                axis=1)

            # This is a custom tensorflow op for doing ROI align. See CODEBASE.md for more info
            roi_feature_maps = tf.roi_align(featuremap,
                                            boxes,
                                            pooled_height=resolution,
                                            pooled_width=resolution,
                                            spatial_scale=1.0 /
                                            cfg.FPN.ANCHOR_STRIDES[i],
                                            sampling_ratio=2)
            all_rois.append(roi_feature_maps)

    # this can fail if using TF<=1.8 with MKL build
    all_rois = tf.concat(all_rois, axis=0)  # NCHW
    # Unshuffle to the original order, to match the original samples
    level_id_perm = tf.concat(level_ids, axis=0)  # A permutation of 1~N
    level_id_invert_perm = tf.invert_permutation(level_id_perm)
    all_rois = tf.gather(all_rois, level_id_invert_perm)
    return all_rois
Ejemplo n.º 22
0
    def get_output(self, dec_output, biases):
        """
        get svd-softmax approximation
        :param dec: A Tensor [batch_size*seq_length, hidden_units], decoder output
        :param biases: A Tensor [tgt_vocab_size], output bias
        :return: A Tensor [batch_size*seq_length, tgt_vocab_size], output after softmax approximation
        """
        _h = tf.einsum('ij,aj->ai', self.V_t, dec_output)
        _z = tf.add(tf.einsum('ij,aj->ai', self.B[:, :self.window_size], _h[:, :self.window_size]), biases)

        top_k = tf.nn.top_k(_z, k=self.tgt_vocab_size)
        _indices, values = top_k.indices, top_k.values

        _z = tf.add(tf.squeeze(tf.matmul(tf.gather(self.B, _indices[:, :self.num_full_view]), tf.expand_dims(_h, axis=-1))),
                    tf.gather(biases, _indices[:, :self.num_full_view]))
        _z = tf.concat([_z, values[:, self.num_full_view:]], axis=-1)
        _z = tf.map_fn(lambda x: tf.gather(x[0], tf.invert_permutation(x[1])), (_z, _indices), dtype=tf.float32)
        _z = tf.exp(_z)
        Z = tf.expand_dims(tf.reduce_sum(_z, axis=-1), axis=1)
        logits = _z / Z

        return logits
    def multilevel_roi_align(self, features, rcnn_boxes):
        """ROI align pooling feature from the right level of feature."""
        config = self.config
        assert len(features) == 4
        # Reassign rcnn_boxes to levels # based on box area size
        level_ids, level_boxes = self.fpn_map_rois_to_levels(rcnn_boxes)
        all_rois = []

        # Crop patches from corresponding levels
        for i_, boxes, featuremap in zip(itertools.count(), level_boxes,
                                         features):
            with tf.name_scope("roi_level%s" % (i_ + 2)):
                boxes_on_featuremap = boxes * (1.0 / config.anchor_strides[i_])
                all_rois.append(
                    roi_align(featuremap, boxes_on_featuremap, config.person_h,
                              config.person_w))

        all_rois = tf.concat(all_rois, axis=0)  # NCHW
        # Unshuffle to the original order, to match the original samples
        level_id_perm = tf.concat(level_ids, axis=0)  # A permutation of 1~N
        level_id_invert_perm = tf.invert_permutation(level_id_perm)
        all_rois = tf.gather(all_rois, level_id_invert_perm)
        return all_rois
def multilevel_roi_align(fpn_feats, boxes, level_indexes, output_shape,
                         eff_config):
  """
    Given [R, 4] boxes and [R] level_indexes indicating the FPN level
    # boxes are x1, y1, x2, y2
  """
  # gather boxes for each feature level
  all_rois = []
  level_ids = []
  # for debuging
  #boxes_on_fp = []
  #1920 -> [160, 80, 40, 20, 10]/{3, 4, 5, 6, 7}
  for level in range(eff_config.min_level, eff_config.max_level + 1):
    this_level_boxes_idxs = tf.where(tf.equal(level_indexes, level))
    # [K, 1] -> [K]
    this_level_boxes_idxs = tf.reshape(this_level_boxes_idxs, [-1])
    level_ids.append(this_level_boxes_idxs)
    this_level_boxes = tf.gather(boxes, this_level_boxes_idxs)
    boxes_on_featuremap = this_level_boxes * (1.0 / (2. ** level))
    featuremap = fpn_feats[level]  # [1, H, W, C]
    # [K, output_shape, output_shape, C]
    box_feats = roi_align(featuremap, boxes_on_featuremap, output_shape)
    box_feats = tf.reduce_mean(box_feats, axis=[1, 2])  #  [K, C]
    all_rois.append(box_feats)

    # for debugging
    #boxes_on_fp.append(boxes_on_featuremap)

  all_rois = tf.concat(all_rois, axis=0)
  # Unshuffle to the original order, to match the original samples
  level_id_perm = tf.concat(level_ids, axis=0)  # A permutation of 1~N
  level_id_invert_perm = tf.invert_permutation(level_id_perm)
  all_rois = tf.gather(all_rois, level_id_invert_perm)

  #boxes_on_fp = tf.concat(boxes_on_fp, axis=0)
  #boxes_on_fp = tf.gather(boxes_on_fp, level_id_invert_perm)
  return all_rois#, boxes_on_fp
Ejemplo n.º 25
0
def rankdata(a, method='average', scope='rankdata'):
    with tf.name_scope(scope):
        arr = tf.reshape(a, shape=[-1])
        _, sorter = tf.nn.top_k(-arr, tf.shape(arr)[-1])
        inv = tf.invert_permutation(sorter)
        if method == 'ordinal':
            res = inv + 1
        else:
            arr = tf.gather(arr, sorter)
            obs = tf.cast(tf.not_equal(arr[1:], arr[:-1]), dtype=tf.int32)
            obs = tf.concat([[1], obs], axis=0)
            dense = tf.gather(tf.cumsum(obs), inv)
            if method == 'dense':
                res = dense
            else:
                count = tf.reshape(tf.where(tf.not_equal(obs, tf.zeros_like(obs))), [-1])
                count = tf.concat([tf.cast(count, tf.int32), tf.shape(obs)], axis=0)
                if method == 'max':
                    res = tf.gather(count, dense)
                elif method == 'min':
                    res = tf.gather(count, dense - 1) + 1
                else:
                    res = (tf.gather(count, dense) + tf.gather(count, dense - 1) + 1) / 2
        return tf.cast(res, tf.float32)
Ejemplo n.º 26
0
def covariance(x,
               y=None,
               sample_axis=0,
               event_axis=-1,
               keepdims=False,
               name=None):
    """Estimate covariance between members of `event_axis`.

  Sample covariance for scalars is defined as:

  ```none
  Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
  Xbar := N^{-1} sum_{n=1}^N X_n
  Ybar := N^{-1} sum_{n=1}^N Y_n
  ```

  For vectors `X = (X1, ..., XN)`, `Y = (Y1, ..., YN)`, one is often interested
  in the covariance matrix, `C_{ij} := Cov[Xi, Yj]`.

  ```python
  x = tf.random_normal(shape=(100, 2, 3))
  y = tf.random_normal(shape=(100, 2, 3))

  # cov[i, j] is the sample covariance between x[:, i, j] and y[:, i, j].
  cov = covariance(x, y, sample_axis=0, event_axis=None)

  # cov_matrix[i, m, n] is the sample covariance of x[:, i, m] and y[:, i, n]
  cov_matrix = covariance(x, y, sample_axis=0, event_axis=-1)
  ```

  Notice we divide by `N` (the numpy default), which does not create `NaN`
  when `N = 1`, but is slightly biased.

  Args:
    x:  A numeric `Tensor` holding samples.
    y:  Optional `Tensor` with same `dtype` and `shape` as `x`.
      Default value: `None` (`y` is effectively set to `x`).
    sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
      `None` (meaning all axis hold samples).
      Default value: `0` (leftmost dimension).
    event_axis:  Scalar or vector `Tensor`, or `None`. Axis holding random
      events, whose covariance we are interested in. If a vector, entries must
      form a contiguous block of dims. `sample_axis` and `event_axis` should not
      intersect.
      Default value: `-1` (rightmost axis holds events).
    keepdims:  Boolean.  Whether to keep the sample axis as singletons.
    name: Python `str` name prefixed to Ops created by this function.
          Default value: `None` (i.e., `'covariance'`).

  Returns:
    cov: A `Tensor` of same `dtype` as the `x`, and rank equal to
      `rank(x) - len(sample_axis) + 2 * len(event_axis)`.

  Raises:
    AssertionError:  If `x` and `y` are found to have different shape.
    ValueError:  If `sample_axis` and `event_axis` are found to overlap.
    ValueError:  If `event_axis` is found to not be contiguous.
  """

    with tf.name_scope(name,
                       'covariance',
                       values=[x, y, event_axis, sample_axis]):
        x = tf.convert_to_tensor(x, name='x')
        # Covariance *only* uses the centered versions of x (and y).
        x -= tf.reduce_mean(x, axis=sample_axis, keepdims=True)

        if y is None:
            y = x
        else:
            y = tf.convert_to_tensor(y, name='y', dtype=x.dtype)
            # If x and y have different shape, sample_axis and event_axis will likely
            # be wrong for one of them!
            x.shape.assert_is_compatible_with(y.shape)
            y -= tf.reduce_mean(y, axis=sample_axis, keepdims=True)

        if event_axis is None:
            return tf.reduce_mean(x * tf.conj(y),
                                  axis=sample_axis,
                                  keepdims=keepdims)

        if sample_axis is None:
            raise ValueError(
                'sample_axis was None, which means all axis hold events, and this '
                'overlaps with event_axis ({})'.format(event_axis))

        event_axis = _make_positive_axis(event_axis, tf.rank(x))
        sample_axis = _make_positive_axis(sample_axis, tf.rank(x))

        # If we get lucky and axis is statically defined, we can do some checks.
        if _is_list_like(event_axis) and _is_list_like(sample_axis):
            if set(event_axis).intersection(sample_axis):
                raise ValueError(
                    'sample_axis ({}) and event_axis ({}) overlapped'.format(
                        sample_axis, event_axis))
            if (np.diff(sorted(event_axis)) > 1).any():
                raise ValueError(
                    'event_axis must be contiguous. Found: {}'.format(
                        event_axis))
            batch_axis = list(
                sorted(
                    set(range(x.shape.ndims)).difference(sample_axis +
                                                         event_axis)))
        else:
            batch_axis, _ = tf.setdiff1d(
                tf.range(0, tf.rank(x)), tf.concat((sample_axis, event_axis),
                                                   0))

        event_axis = tf.convert_to_tensor(event_axis, name='event_axis')
        sample_axis = tf.convert_to_tensor(sample_axis, name='sample_axis')
        batch_axis = tf.convert_to_tensor(batch_axis, name='batch_axis')

        # Permute x/y until shape = B + E + S
        perm_for_xy = tf.concat((batch_axis, event_axis, sample_axis), 0)
        x_permed = tf.transpose(x, perm=perm_for_xy)
        y_permed = tf.transpose(y, perm=perm_for_xy)

        batch_ndims = tf.size(batch_axis)
        batch_shape = tf.shape(x_permed)[:batch_ndims]
        event_ndims = tf.size(event_axis)
        event_shape = tf.shape(x_permed)[batch_ndims:batch_ndims + event_ndims]
        sample_shape = tf.shape(x_permed)[batch_ndims + event_ndims:]
        sample_ndims = tf.size(sample_shape)
        n_samples = tf.reduce_prod(sample_shape)
        n_events = tf.reduce_prod(event_shape)

        # Flatten sample_axis into one long dim.
        x_permed_flat = tf.reshape(
            x_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
        y_permed_flat = tf.reshape(
            y_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
        # Do the same for event_axis.
        x_permed_flat = tf.reshape(
            x_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))
        y_permed_flat = tf.reshape(
            y_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))

        # After matmul, cov.shape = batch_shape + [n_events, n_events]
        cov = tf.matmul(x_permed_flat, y_permed_flat,
                        adjoint_b=True) / tf.cast(n_samples, x.dtype)

        # Insert some singletons to make
        # cov.shape = batch_shape + event_shape**2 + [1,...,1]
        # This is just like x_permed.shape, except the sample_axis is all 1's, and
        # the [n_events] became event_shape**2.
        cov = tf.reshape(
            cov,
            tf.concat(
                (
                    batch_shape,
                    # event_shape**2 used here because it is the same length as
                    # event_shape, and has the same number of elements as one
                    # batch of covariance.
                    event_shape**2,
                    tf.ones([sample_ndims], tf.int32)),
                0))
        # Permuting by the argsort inverts the permutation, making
        # cov.shape have ones in the position where there were samples, and
        # [n_events * n_events] in the event position.
        cov = tf.transpose(cov, perm=tf.invert_permutation(perm_for_xy))

        # Now expand event_shape**2 into event_shape + event_shape.
        # We here use (for the first time) the fact that we require event_axis to be
        # contiguous.
        e_start = event_axis[0]
        e_len = 1 + event_axis[-1] - event_axis[0]
        cov = tf.reshape(
            cov,
            tf.concat((tf.shape(cov)[:e_start], event_shape, event_shape,
                       tf.shape(cov)[e_start + e_len:]), 0))

        # tf.squeeze requires python ints for axis, not Tensor.  This is enough to
        # require our axis args to be constants.
        if not keepdims:
            squeeze_axis = tf.where(sample_axis < e_start, sample_axis,
                                    sample_axis + e_len)
            cov = _squeeze(cov, axis=squeeze_axis)

        return cov
Ejemplo n.º 27
0
def testInversePerm():
    x = tf.constant(
        [[3, 2, 1, 0], [2, 3, 0, 1]],
        dtype=tf.int32)
    with tf.Session() as sess:
        print(sess.run([tf.invert_permutation(x)]))
Ejemplo n.º 28
0
 def test_InvertPermutation(self):
     t = tf.invert_permutation(np.random.permutation(10))
     self.check(t)
Ejemplo n.º 29
0
 def _inverse(self, y):
   return tf.gather(y, tf.invert_permutation(self.permutation), axis=self.axis)
Ejemplo n.º 30
0
def covariance(x,
               y=None,
               sample_axis=0,
               event_axis=-1,
               keepdims=False,
               name=None):
  """Sample covariance between observations indexed by `event_axis`.

  Given `N` samples of scalar random variables `X` and `Y`, covariance may be
  estimated as

  ```none
  Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
  Xbar := N^{-1} sum_{n=1}^N X_n
  Ybar := N^{-1} sum_{n=1}^N Y_n
  ```

  For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,
  one is often interested in the covariance matrix, `C_{ij} := Cov[Xi, Yj]`.

  ```python
  x = tf.random_normal(shape=(100, 2, 3))
  y = tf.random_normal(shape=(100, 2, 3))

  # cov[i, j] is the sample covariance between x[:, i, j] and y[:, i, j].
  cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None)

  # cov_matrix[i, m, n] is the sample covariance of x[:, i, m] and y[:, i, n]
  cov_matrix = tfp.stats.covariance(x, y, sample_axis=0, event_axis=-1)
  ```

  Notice we divide by `N` (the numpy default), which does not create `NaN`
  when `N = 1`, but is slightly biased.

  Args:
    x:  A numeric `Tensor` holding samples.
    y:  Optional `Tensor` with same `dtype` and `shape` as `x`.
      Default value: `None` (`y` is effectively set to `x`).
    sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
      `None` (meaning all axis hold samples).
      Default value: `0` (leftmost dimension).
    event_axis:  Scalar or vector `Tensor`, or `None` (scalar events).
      Axis indexing random events, whose covariance we are interested in.
      If a vector, entries must form a contiguous block of dims. `sample_axis`
      and `event_axis` should not intersect.
      Default value: `-1` (rightmost axis holds events).
    keepdims:  Boolean.  Whether to keep the sample axis as singletons.
    name: Python `str` name prefixed to Ops created by this function.
          Default value: `None` (i.e., `'covariance'`).

  Returns:
    cov: A `Tensor` of same `dtype` as the `x`, and rank equal to
      `rank(x) - len(sample_axis) + 2 * len(event_axis)`.

  Raises:
    AssertionError:  If `x` and `y` are found to have different shape.
    ValueError:  If `sample_axis` and `event_axis` are found to overlap.
    ValueError:  If `event_axis` is found to not be contiguous.
  """

  with tf.name_scope(
      name, 'covariance', values=[x, y, event_axis, sample_axis]):
    x = tf.convert_to_tensor(x, name='x')
    # Covariance *only* uses the centered versions of x (and y).
    x -= tf.reduce_mean(x, axis=sample_axis, keepdims=True)

    if y is None:
      y = x
    else:
      y = tf.convert_to_tensor(y, name='y', dtype=x.dtype)
      # If x and y have different shape, sample_axis and event_axis will likely
      # be wrong for one of them!
      x.shape.assert_is_compatible_with(y.shape)
      y -= tf.reduce_mean(y, axis=sample_axis, keepdims=True)

    if event_axis is None:
      return tf.reduce_mean(x * tf.conj(y), axis=sample_axis, keepdims=keepdims)

    if sample_axis is None:
      raise ValueError(
          'sample_axis was None, which means all axis hold events, and this '
          'overlaps with event_axis ({})'.format(event_axis))

    event_axis = _make_positive_axis(event_axis, tf.rank(x))
    sample_axis = _make_positive_axis(sample_axis, tf.rank(x))

    # If we get lucky and axis is statically defined, we can do some checks.
    if _is_list_like(event_axis) and _is_list_like(sample_axis):
      if set(event_axis).intersection(sample_axis):
        raise ValueError(
            'sample_axis ({}) and event_axis ({}) overlapped'.format(
                sample_axis, event_axis))
      if (np.diff(sorted(event_axis)) > 1).any():
        raise ValueError(
            'event_axis must be contiguous. Found: {}'.format(event_axis))
      batch_axis = list(
          sorted(
              set(range(x.shape.ndims)).difference(sample_axis + event_axis)))
    else:
      batch_axis, _ = tf.setdiff1d(
          tf.range(0, tf.rank(x)), tf.concat((sample_axis, event_axis), 0))

    event_axis = tf.convert_to_tensor(
        event_axis, name='event_axis', dtype=tf.int32)
    sample_axis = tf.convert_to_tensor(
        sample_axis, name='sample_axis', dtype=tf.int32)
    batch_axis = tf.convert_to_tensor(
        batch_axis, name='batch_axis', dtype=tf.int32)

    # Permute x/y until shape = B + E + S
    perm_for_xy = tf.concat((batch_axis, event_axis, sample_axis), 0)
    x_permed = tf.transpose(x, perm=perm_for_xy)
    y_permed = tf.transpose(y, perm=perm_for_xy)

    batch_ndims = tf.size(batch_axis)
    batch_shape = tf.shape(x_permed)[:batch_ndims]
    event_ndims = tf.size(event_axis)
    event_shape = tf.shape(x_permed)[batch_ndims:batch_ndims + event_ndims]
    sample_shape = tf.shape(x_permed)[batch_ndims + event_ndims:]
    sample_ndims = tf.size(sample_shape)
    n_samples = tf.reduce_prod(sample_shape)
    n_events = tf.reduce_prod(event_shape)

    # Flatten sample_axis into one long dim.
    x_permed_flat = tf.reshape(
        x_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
    y_permed_flat = tf.reshape(
        y_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
    # Do the same for event_axis.
    x_permed_flat = tf.reshape(
        x_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))
    y_permed_flat = tf.reshape(
        y_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))

    # After matmul, cov.shape = batch_shape + [n_events, n_events]
    cov = tf.matmul(
        x_permed_flat, y_permed_flat, adjoint_b=True) / tf.cast(
            n_samples, x.dtype)

    # Insert some singletons to make
    # cov.shape = batch_shape + event_shape**2 + [1,...,1]
    # This is just like x_permed.shape, except the sample_axis is all 1's, and
    # the [n_events] became event_shape**2.
    cov = tf.reshape(
        cov,
        tf.concat(
            (
                batch_shape,
                # event_shape**2 used here because it is the same length as
                # event_shape, and has the same number of elements as one
                # batch of covariance.
                event_shape**2,
                tf.ones([sample_ndims], tf.int32)),
            0))
    # Permuting by the argsort inverts the permutation, making
    # cov.shape have ones in the position where there were samples, and
    # [n_events * n_events] in the event position.
    cov = tf.transpose(cov, perm=tf.invert_permutation(perm_for_xy))

    # Now expand event_shape**2 into event_shape + event_shape.
    # We here use (for the first time) the fact that we require event_axis to be
    # contiguous.
    e_start = event_axis[0]
    e_len = 1 + event_axis[-1] - event_axis[0]
    cov = tf.reshape(
        cov,
        tf.concat((tf.shape(cov)[:e_start], event_shape, event_shape,
                   tf.shape(cov)[e_start + e_len:]), 0))

    # tf.squeeze requires python ints for axis, not Tensor.  This is enough to
    # require our axis args to be constants.
    if not keepdims:
      squeeze_axis = tf.where(sample_axis < e_start, sample_axis,
                              sample_axis + e_len)
      cov = _squeeze(cov, axis=squeeze_axis)

    return cov
Ejemplo n.º 31
0
        z[i] = True
    else:
        z[i] = False
z = z.reshape((10, 5, 4))
z_where = tf.where(z)

# tf.unique
x = np.random.randint(0, 10, 100)
z_unique = tf.unique(x)

# tf.edit_distance

# tf.invert_permutation
x = np.arange(0, 10)
np.random.shuffle(x)
z_invert_permutation = tf.invert_permutation(x)

with tf.Session() as sess:

    print "tf.argmin"
    print sess.run(z_argmin)
    print "z_argmax"
    print sess.run(z_argmax)
    print "tf.listdiff"
    print sess.run(z_listdiff)
    print "tf.where"
    print sess.run(z_where)
    print "tf.unique"
    print sess.run(z_unique)
    #print "tf.edit_distance"
    #print sess.run(z_edit_distance)
Ejemplo n.º 32
0
 def test_InvertPermutation(self):
     t = tf.invert_permutation(np.random.permutation(10))
     self.check(t)
Ejemplo n.º 33
0
 def _inverse(self, y):
     return tf.gather(y,
                      tf.invert_permutation(self.permutation),
                      axis=self.axis)
Ejemplo n.º 34
0
tf.setdiff1d(x, y, index_dtype=tf.int32, name=None)
 
# 返回 x 中的唯一值所组成的tensor 和原 tensor 中元素在现 tensor 中的索引
tf.unique(x, out_idx=None, name=None)
 
# x if condition else y, condition 为 bool 类型的,可用tf.equal()等来表示
# x 和 y 的形状和数据类型必须一致
tf.where(condition, x=None, y=None, name=None)
 
# 返回沿着坐标轴方向的最大/最小值的索引
tf.argmax(input, axis=None, name=None, output_type=tf.int64)
tf.argmin(input, axis=None, name=None, output_type=tf.int64)
 
# x 的值当作 y 的索引,range(len(x)) 索引当作 y 的值
# y[x[i]] = i for i in [0, 1, ..., len(x) - 1]
tf.invert_permutation(x, name=None)
 
# 其它
tf.edit_distance

# transformation
tf.shape(input, name=None)
tf.size(input, name=None)
tf.rank(input, name=None) # tensor的rank表示一个tensor需要的索引数目来唯一表示任何一个元素
tf.reshape(tensor, shape, name=None)
tf.expand_dims(input, dim, name=None)

tf.slice(input_, begin, size, name=None)
tf.split(split_dim, num_split, value, name=’split’)
tf.concat(concat_dim, values, name = 'concat')
tf.pack(values, axis = 0, name = 'pack')