Пример #1
0
def show_errors(model, data, trash=0.35, take=15):
    dataset = data.map(preprocess).batch(4).take(take)
    for xb, yb in dataset:
        for i in range(0, 4):
            mappa = model.predict(xb)[i]
            mappa_true = yb[i]

            im = (xb[i].numpy())
            if tf.math.reduce_max(mappa) > trash:
                if tf.math.reduce_max(mappa_true) == 1:
                    if tf.math.reduce_max(mappa) != (tf.math.reduce_max(
                            tf.where(mappa_true == 1, mappa, 0))):
                        cent = tf.unravel_index(
                            tf.math.argmax(tf.reshape(mappa, [-1])),
                            [268, 480])
                        cv2.circle(im, (cent[1] * 4, cent[0] * 4), 7,
                                   (1, 0, 0), 5)
                        plt.figure(figsize=(20, 10))
                        plt.title('wrong place detection')
                        plt.imshow(im)
                else:
                    cent = tf.unravel_index(
                        tf.math.argmax(tf.reshape(mappa, [-1])), [268, 480])
                    cv2.circle(im, (cent[1] * 4, cent[0] * 4), 7, (1, 0, 0), 5)
                    plt.figure(figsize=(20, 10))
                    plt.title('wrong detection')
                    plt.imshow(im)
            else:
                if tf.math.reduce_max(mappa_true) == 1:
                    plt.figure(figsize=(20, 10))
                    plt.title('missed detection')
                    plt.imshow(im)
Пример #2
0
 def call_(self,
           inputs,
           pool_size=(1, 2, 2, 1),
           strides=(1, 2, 2, 1),
           padding='VALID'):
     out, argmax = tf.nn.max_pool_with_argmax(inputs[0], pool_size, strides,
                                              padding)
     a = tf.unravel_index(tf.reshape(argmax, [-1]),
                          tf.shape(inputs[0], out_type=tf.int64))
     a = tf.transpose(a)
     tl = tf.unravel_index(
         tf.range(0, tf.size(inputs[1], out_type=tf.int64), dtype=tf.int64),
         tf.shape(inputs[1], out_type=tf.int64))
     tl = tf.transpose(tl) * strides
     # e = (a-tl)[:,1:3]
     # a = tf.concat([a,e],axis=1)
     # out_shape = tf.concat([ tf.shape(inputs[0], out_type=tf.int64), pool_size[1:3] ], axis=0)
     #out_shape = [1, 64,64, 64,3,3]
     out_shape = tf.shape(inputs[0], out_type=tf.int64)
     b = tf.SparseTensor(indices=tl,
                         values=tf.reshape(inputs[1], [-1]),
                         dense_shape=out_shape)
     c = tf.sparse_tensor_to_dense(b, validate_indices=False)
     c = tf.reduce_max(c, [-1])
     # b = tf.SparseTensor(indices=a,
     #                      values=tf.reshape(inputs[1], [-1]),
     #                      dense_shape= tf.shape(inputs[0], out_type=tf.int64))
     # c = tf.sparse_tensor_to_dense(b,validate_indices=False)
     #return c, out, tf.expand_dims(a,0), tf.expand_dims(e,0), tf.expand_dims(out_shape,0)
     return c
Пример #3
0
    def inference(self,inputs):
        ''' Direct TF inference on GPU. Added with: https://arxiv.org/abs/1909.11229'''
        cfg=self.cfg
        heads = self.get_net(inputs)
        locref=heads['locref']
        probs = tf.sigmoid(heads['part_pred'])

        if cfg.batch_size==1:
            probs = tf.squeeze(probs, axis=0)
            locref = tf.squeeze(locref, axis=0)
            l_shape = tf.shape(probs)

            locref = tf.reshape(locref, (l_shape[0]*l_shape[1], -1, 2))
            probs = tf.reshape(probs , (l_shape[0]*l_shape[1], -1))
            maxloc = tf.argmax(probs, axis=0)

            loc = tf.unravel_index(maxloc, (tf.cast(l_shape[0], tf.int64), tf.cast(l_shape[1], tf.int64)))
            maxloc = tf.reshape(maxloc, (1, -1))

            joints = tf.reshape(tf.range(0, tf.cast(l_shape[2], dtype=tf.int64)), (1,-1))
            indices = tf.transpose(tf.concat([maxloc,joints] , axis=0))

            offset = tf.gather_nd(locref, indices)
            offset = tf.gather(offset, [1,0], axis=1)
            likelihood = tf.reshape(tf.gather_nd(probs, indices), (-1,1))

            pose = self.cfg.stride*tf.cast(tf.transpose(loc), dtype=tf.float32) + self.cfg.stride*0.5 + offset*cfg.locref_stdev
            pose = tf.concat([pose, likelihood], axis=1)

            return {'pose': pose}
        else:
            #probs = tf.squeeze(probs, axis=0)
            l_shape = tf.shape(probs) #batchsize times x times y times body parts
            #locref = locref*cfg.locref_stdev
            locref = tf.reshape(locref, (l_shape[0],l_shape[1],l_shape[2],l_shape[3], 2))
            #turn into x times y time bs * bpts
            locref=tf.transpose(locref,[1,2,0,3,4])
            probs=tf.transpose(probs,[1,2,0,3])

            #print(locref.get_shape().as_list())
            #print(probs.get_shape().as_list())
            l_shape = tf.shape(probs) # x times y times batch times body parts

            locref = tf.reshape(locref, (l_shape[0]*l_shape[1], -1, 2))
            probs = tf.reshape(probs , (l_shape[0]*l_shape[1],-1))
            maxloc = tf.argmax(probs, axis=0)
            loc = tf.unravel_index(maxloc, (tf.cast(l_shape[0], tf.int64), tf.cast(l_shape[1], tf.int64))) #tuple of max indices

            maxloc = tf.reshape(maxloc, (1, -1))
            joints = tf.reshape(tf.range(0, tf.cast(l_shape[2]*l_shape[3], dtype=tf.int64)), (1,-1))
            indices = tf.transpose(tf.concat([maxloc,joints] , axis=0))

            #extract corresponding locref x and y as well as probability
            offset = tf.gather_nd(locref, indices)
            offset = tf.gather(offset, [1,0], axis=1)
            likelihood = tf.reshape(tf.gather_nd(probs, indices), (-1,1))

            pose = self.cfg.stride*tf.cast(tf.transpose(loc), dtype=tf.float32) + self.cfg.stride*0.5 + offset*cfg.locref_stdev
            pose = tf.concat([pose, likelihood], axis=1)
            return {'pose': pose}
Пример #4
0
    def inference(self, inputs):
        """ Direct TF inference on GPU.
        Added with: https://arxiv.org/abs/1909.11229
        """
        heads = self.get_net(inputs)
        locref = heads["locref"]
        probs = tf.sigmoid(heads["part_pred"])

        if self.cfg['batch_size'] == 1:
            probs = tf.squeeze(probs, axis=0)
            locref = tf.squeeze(locref, axis=0)
            l_shape = tf.shape(input=probs)
            locref = tf.reshape(locref, (l_shape[0] * l_shape[1], -1, 2))
            probs = tf.reshape(probs, (l_shape[0] * l_shape[1], -1))
            maxloc = tf.argmax(input=probs, axis=0)
            loc = tf.unravel_index(
                maxloc, (tf.cast(l_shape[0], tf.int64), tf.cast(l_shape[1], tf.int64))
            )
            maxloc = tf.reshape(maxloc, (1, -1))

            joints = tf.reshape(
                tf.range(0, tf.cast(l_shape[2], dtype=tf.int64)), (1, -1)
            )
        else:
            l_shape = tf.shape(input=probs)  # batchsize times x times y times body parts
            locref = tf.reshape(
                locref, (l_shape[0], l_shape[1], l_shape[2], l_shape[3], 2)
            )
            # turn into x times y time bs * bpts
            locref = tf.transpose(a=locref, perm=[1, 2, 0, 3, 4])
            probs = tf.transpose(a=probs, perm=[1, 2, 0, 3])

            l_shape = tf.shape(input=probs)  # x times y times batch times body parts

            locref = tf.reshape(locref, (l_shape[0] * l_shape[1], -1, 2))
            probs = tf.reshape(probs, (l_shape[0] * l_shape[1], -1))
            maxloc = tf.argmax(input=probs, axis=0)
            loc = tf.unravel_index(
                maxloc, (tf.cast(l_shape[0], tf.int64), tf.cast(l_shape[1], tf.int64))
            )  # tuple of max indices
            maxloc = tf.reshape(maxloc, (1, -1))
            joints = tf.reshape(
                tf.range(0, tf.cast(l_shape[2] * l_shape[3], dtype=tf.int64)), (1, -1)
            )

        # extract corresponding locref x and y as well as probability
        indices = tf.transpose(a=tf.concat([maxloc, joints], axis=0))
        offset = tf.gather_nd(locref, indices)
        offset = tf.gather(offset, [1, 0], axis=1)
        likelihood = tf.reshape(tf.gather_nd(probs, indices), (-1, 1))

        pose = (
                self.cfg['stride'] * tf.cast(tf.transpose(a=loc), dtype=tf.float32)
                + self.cfg['stride'] * 0.5
                + offset * self.cfg['locref_stdev']
        )
        pose = tf.concat([pose, likelihood], axis=1)
        return {"pose": pose}
Пример #5
0
def euc_error(y_true, y_pred, target_size, axis=2):
    y_true_inds = K.argmax(y_true, axis=axis)
    y_true_inds = tf.unravel_index(K.reshape(y_true_inds, [-1]), target_size)
    y_pred_inds = K.argmax(y_pred, axis=axis)
    y_pred_inds = tf.unravel_index(K.reshape(y_pred_inds, [-1]), target_size)

    true_pred_diff = K.sum((y_true_inds - y_pred_inds)**2, axis=0)
    euc_distance = tf.sqrt(tf.cast(true_pred_diff, dtype=tf.float32))
    return tf.reduce_mean(euc_distance)
Пример #6
0
def get_vote_probability(flat_index, phc_shape, demo, partitions):
    """
    Find the probability of a PHC's cell producing a
    vote outcome of a given election for a candidate,
    with a given PHC.

    flat_index (int): the flat index of the selected cell
    phc_shape (tuple): the shape of a PHC's Tensor representation
    demo (tuple): the demographics of the district
    partitions (tuple): the partitions of votes for a candidate

    return: the probability that a PHC's cell produced the observed outcome
    """
    # Find the corresponding index
    index = tf.unravel_index(flat_index, phc_shape)
    matrix_dim = phc_shape[0]

    # Find the vote percentages for each demographic group
    vote_pcts = elect.get_vote_pcts_list(index, matrix_dim)

    # Binomial calculation
    # Independent binomial distributions for each demographic group where each
    # represents the probability of the voters in that group voting together
    # to satisfy the possible partitions of voters
    pmf = tfp.distributions.Binomial(demo, probs=vote_pcts).prob(partitions)

    return tf.math.reduce_sum(tf.math.reduce_prod(pmf, 1))
Пример #7
0
    def test_maximum_patch(self):
        glimpse_shp = [3, 3]
        img = np.array([[[0, 1, 2],
                         [3, 4, 5],
                         [6, 7, 8]],
                        [[-5, -5, -5],
                         [3, 4, -5],
                         [6, 7, -5]]
                        ], dtype=np.float32)
        img = img[:, :, :, np.newaxis]

        # img = np.tile(img, [2, 1, 1, 1])

        max_patch = maximum_patch(tf.constant(img), glimpse_shp)
        max_patch = tf.squeeze(max_patch, axis=3)
        # unravel_index returns a tuple of (row_idx, col_idx), but as a single tensor
        row_col_tuple = tf.unravel_index(tf.argmax(tf.layers.flatten(max_patch), axis=-1), glimpse_shp)
        idx = tf.transpose(row_col_tuple, [1, 0])

        with self.test_session():
            self.assertAllEqual([[[8, 15, 12],
                                  [21, 36, 27],
                                  [20, 33, 24]],
                                 [[-3, -13, -11],
                                  [10, -5, -9],
                                  [20, 10, 1]]], max_patch.eval())



            self.assertAllEqual([[1, 1],
                                 [2, 0]], idx.eval())
Пример #8
0
 def get_best_action(self, state):
     state = np.expand_dims(state, axis=0)  # convert to batch
     state = tf.constant(state, dtype=tf.float32)
     p = self.brain.P(state)
     argmax = tf.argmax(p, axis=1)
     coords = tf.unravel_index(argmax, state.shape[1:3])
     return argmax.numpy(), tf.squeeze(coords).numpy()
Пример #9
0
    def act(self, belief, keep_tensor=False):
        """Perform an action. 
        If keep_tensor=True then return a tensor of Tensorflow.
        
        Args:
            belief (numpy.ndarray): Agent's belief of the env state given n observations (computed by agent.guess(o, h))

        Return:
            act ([numpy.array, tensorflow.tensor]): 
        """
        single_input_flag = False
        if belief.ndim < self.ndim:
            single_input_flag = True
            belief = belief[None, ...]
        if isinstance(self.action_space, gym.spaces.MultiDiscrete):
            act = self.model.argmax_qvalue(belief)
            act = tf.unravel_index(act, self.action_space.nvec)
            act = tf.transpose(act)
            if single_input_flag:
                act = act[0]
            if not keep_tensor:
                act = act.numpy()
        else:
            act = self.model.argmax_qvalue(belief)
            if not keep_tensor:
                if single_input_flag:
                    act = int(act)
                else:
                    act = act.numpy()
        return act
Пример #10
0
def plot_post(model, data, trash=0.35):
    dataset = data.map(preprocess).batch(4).take(1)
    for xb, yb in dataset:
        pred = model.predict(xb)
        for i in range(0, 4):
            f = plt.figure(figsize=(15, 10))
            f.add_subplot(2, 2, 1)

            plt.imshow(xb[i].numpy())
            plt.title('Plain image')

            f.add_subplot(2, 2, 2)
            im = cv2.resize(xb[i].numpy(), (480, 270))
            plt.imshow(im[0:268, 0:480])
            plt.imshow(yb[i], alpha=0.7, cmap='Oranges')
            plt.title('Ground truth')

            f.add_subplot(2, 2, 3)
            mappa = pred[i]
            plt.imshow(mappa)
            plt.title('Confidence map')

            f.add_subplot(2, 2, 4)
            im = (xb[i].numpy())
            if tf.math.reduce_max(mappa) > trash:
                cent = tf.unravel_index(
                    tf.math.argmax(tf.reshape(mappa, [-1])), [268, 480])
                cv2.circle(im, (cent[1] * 4, cent[0] * 4), 5, (1, 0, 0), 3)

            plt.imshow(im)
            plt.title('Result')
Пример #11
0
    def get_box_highest_percentage(arr):
        shape = tf.shape(arr)

        reshaped = tf.reshape(arr, (shape[0], tf.reduce_prod(shape[1:-1]), -1))

        # returns array containing the index of the highest percentage of each batch
        # where 0 <= index <= height * width
        max_prob_ind = tf.argmax(reshaped[..., -1],
                                 axis=-1,
                                 output_type=tf.int32)

        # turn indices (batch, y * x) into (batch, y, x)
        # returns (3, batch) tensor
        unraveled = tf.unravel_index(max_prob_ind, shape[:-1])

        # turn tensor into (batch, 3) and keep only (y, x)
        unraveled = tf.transpose(unraveled)[:, 1:]
        y, x = unraveled[..., 0], unraveled[..., 1]

        # stack indices and create (batch, 5) tensor which
        # contains height, width, offset_y, offset_x, percentage
        indices = tf.stack([tf.range(shape[0]), y, x], axis=-1)
        box = tf.gather_nd(arr, indices)

        y, x = tf.cast(y, tf.float32), tf.cast(x, tf.float32)

        # transform box to (y + offset_y, x + offset_x, 6 * height, 6 * width, obj)
        # output is (batch, 5)
        out = tf.stack([
            y + box[..., 2], x + box[..., 3], (GRID_SIZE - 1) * box[..., 0],
            (GRID_SIZE - 1) * box[..., 1], box[..., -1]
        ],
                       axis=-1)

        return out
Пример #12
0
    def get_spatial(self, net):
        '''
        Gets the spatial action of the network
        '''
        if self.debug:
            log("getting spatial action")
            s = Stopwatch()
        net = tf.layers.conv2d(self.spatial,
                               32, [3, 3],
                               strides=1,
                               padding='SAME',
                               activation=tf.nn.relu,
                               name="finalConv")
        net = tf.layers.conv2d(net,
                               1, [1, 1],
                               strides=1,
                               padding='SAME',
                               name="conv1x1")

        flat = tf.layers.flatten(net)
        dist = tf.distributions.Categorical(logits=flat)
        sample = dist.sample()

        coords = tf.unravel_index(sample, [self.rows, self.columns / 2])

        if self.debug:
            log("Finished spatial action inference. Took: " + s.delta)
        return coords
Пример #13
0
    def __get_peak_indices_tf(self, array: tf.Tensor, thresh=0.1):
        """
        Returns array indices of the values larger than threshold.

        Parameters
        ----------
        array : ndarray of any shape
            Tensor which values' indices to gather.
        thresh : float
            Threshold value.

        Returns
        -------
        ndarray of shape [n_peaks, dim(array)]
            Array of indices of the values larger than threshold.
        ndarray of shape [n_peaks]
            Array of the values at corresponding indices.

        """
        flat_peaks = tf.reshape(array, [-1])
        coords = tf.range(0, tf.shape(flat_peaks)[0], dtype=tf.int32)

        peaks_coords = coords[flat_peaks > thresh]

        peaks = tf.gather(flat_peaks, peaks_coords)

        indices = tf.transpose(
            tf.unravel_index(peaks_coords, dims=tf.shape(array)), [1, 0])
        return indices, peaks
Пример #14
0
    def get_joints_vol(self, volumes, batch_size, name="volume_to_joints"):
        # volume shape (batch_size, feature_size, feature_size, feature_size * nJoints)
        with tf.device("/device:GPU:0"):
            with tf.variable_scope(name):
                cur_volumes = tf.reshape(
                    tf.transpose(tf.reshape(volumes, [
                        batch_size, self.feature_size, self.feature_size,
                        self.nJoints, self.feature_size
                    ]),
                                 perm=[0, 1, 2, 4, 3]),
                    [batch_size, -1, self.nJoints])
                cur_argmax_index = tf.reshape(tf.argmax(cur_volumes, axis=1),
                                              [-1])

                with tf.device("/cpu:0"):
                    cur_joints = tf.unravel_index(cur_argmax_index, [
                        self.feature_size, self.feature_size, self.feature_size
                    ])

                cur_joints = tf.reshape(tf.transpose(cur_joints),
                                        [-1, self.nJoints, 3])
                cur_joints = tf.concat([
                    cur_joints[:, :, 0:2][:, :, ::-1],
                    cur_joints[:, :, 2][:, :, tf.newaxis]
                ],
                                       axis=2)
                return tf.cast(cur_joints, tf.float32)
Пример #15
0
def test__ravel_action():
    spaces = _get_spaces(5, (3, 3))
    agent = _get_Agent2(spaces)
    assert agent._ravel_action([0,0]) == 0
    assert agent._ravel_action([0,1]) == 1
    assert agent._ravel_action([0,2]) == 2
    assert agent._ravel_action([1,0]) == 3
    assert agent._ravel_action([1,1]) == 4
    assert agent._ravel_action([2,1]) == 7
    assert agent._ravel_action([2,2]) == 8
    action_list = [i for i in range(3*3)]
    for a in action_list:
        assert agent._ravel_action(tf.transpose(tf.unravel_index(a, (3, 3)))) == a
    action_list = [(i,j) for i in range(3) for j in range(3)]
    for a in action_list:
        np.testing.assert_array_equal(tf.transpose(tf.unravel_index(agent._ravel_action(a), (3, 3))), a)
Пример #16
0
def get_ragged_indices(n_in, n_out, num_edges):
    n_in = tf.convert_to_tensor(n_in, tf.int64)
    n_out = tf.convert_to_tensor(n_out, tf.int64)
    ij = tf.random.uniform((num_edges, ), 0, n_in * n_out, dtype=tf.int64)
    ij = tf.sort(ij)
    i, j = tf.unravel_index(ij, (n_out, n_in))
    rt = tf.RaggedTensor.from_value_rowids(j, i)
    return rt.values, rt.row_splits, n_in, n_out
 def find_keypoints(heat_map):
     inds = []
     # num_ppl = params['number_keypoints'] / tf.constant(18)
     for k in range(params['number_keypoints']):
         ind = tf.unravel_index(
             tf.argmax(tf.reshape(heat_map[k, :, :], [-1])), [45, 80])
         inds.append(tf.cast(ind, tf.float32))
     return tf.stack(inds)
Пример #18
0
 def argmax_2D(matrix):
     """
   Returns the x, y of matrix [batch, x, y, 1] matrix 
 """
     flat = tf.reshape(matrix, (matrix.shape[0], -1))
     argmax = tf.argmax(flat, axis=1)
     argmax_index = tf.unravel_index(argmax, matrix.shape)
     coords = tf.transpose(argmax_index[1:3, :])
     return coords
Пример #19
0
def argmax_2d(matrix):

    vector = tf.reshape(matrix, [-1])
    index = tf.argmax(vector)
    shape = tf.constant(
        [matrix.get_shape().as_list()[0],
         matrix.get_shape().as_list()[1]],
        dtype=tf.int64)
    return tf.cast(tf.unravel_index(index, shape), tf.float32)
 def find_keypoints(heat_map):
     inds = []
     # print(heat_map)
     for k in range(params['number_keypoints']):
         ind = tf.unravel_index(
             tf.argmax(tf.reshape(heat_map[k, :, :], [-1])),
             [24, 40])  # 最大值index
         inds.append(tf.cast(ind, tf.float32))
     return tf.stack(inds)  #concate 在dim=0
Пример #21
0
def get_most_probable_cell(phc):
    """
    Find the most probable cell in a PHC.

    phc (Tensor): the Tensor representation of a PHC

    return: the index of the most probable cell
    """
    return tf.unravel_index(np.argmax(phc), phc.shape)
Пример #22
0
def get_joints_2d(heatmaps, nJoints=17, batch_size=4, name="get_joints_hm"):
    with tf.variable_scope(name):
        max_indices = tf.argmax(tf.reshape(heatmaps, [batch_size, -1, 17]),
                                axis=1)
        cur_joints = tf.reshape(
            tf.transpose(
                tf.unravel_index(tf.reshape(max_indices, [-1]), [64, 64])),
            [-1, 17, 2])[:, :, ::-1]
    return cur_joints
Пример #23
0
    def step(self, inputs, batch_predict=False):
        '''
        Takes a step of the Scud model.
        If batch_predict is set to True, we assume inputs is a batch of all env obs 
        and return an array of the corresponding actions.
        '''

        if batch_predict == True:
            batch_list = []
            for game_state in inputs:
                if type(game_state) == util.ControlObject:
                    continue
                k, self.rows, self.columns = obs_parsing.parse_obs(game_state)
                batch_list.append(k)
            if len(batch_list) == 0:
                return [(0, 0, 3) for _ in range(len(inputs))]
            spatial = tf.stack(batch_list, axis=0)
        else:
            if self.mask_output == True or type(inputs) == util.ControlObject:
                if self.debug:
                    print("scud ", self.name, 'output masked')
                return 0, 0, 3
            k, self.rows, self.columns = obs_parsing.parse_obs(inputs)
            spatial = tf.expand_dims(
                k, axis=0)  # now should have shape (1, 8, 8, 25)

        a0, a1 = self.model.predict(spatial)

        arr = []
        rng = 1
        sep_cnt = 0
        if batch_predict:
            rng = len(inputs)

        for i in range(rng):
            if batch_predict:
                if type(inputs[i]) == util.ControlObject:
                    arr.append((0, 0, 3))
                    continue

            building = a0[sep_cnt]
            coords = tf.unravel_index(a1[sep_cnt],
                                      [self.rows, self.columns / 2])

            x = int(coords[0])
            y = int(coords[1])
            if self.debug:
                log("x, y = " + str(x) + ", " + str(y))
            arr.append((x, y, building))
            sep_cnt += 1

        if batch_predict:
            return arr
        else:
            x, y, building = arr[0]
            return x, y, building
Пример #24
0
def random_sparse_indices(
    dense_shape: Sequence[int],
    nnz: int,
    rng: tf.random.Generator,
) -> tf.SparseTensor:
    max_index = tf.cast(tf.reduce_prod(dense_shape), tf.int64)
    indices = rng.uniform((nnz, ), maxval=max_index, dtype=tf.int64)
    indices, _ = tf.unique(indices)
    indices = tf.transpose(tf.unravel_index(tf.sort(indices), dense_shape),
                           (1, 0))
    return indices
Пример #25
0
    def generateAction(self):
        # spatial now should have shape (1, 8, 8, 25)
        a0, a1 = self.model.predict(self.spatial)
        building = a0[0]

        coords = tf.unravel_index(a1[0], [self.rows, self.columns / 2])
        x = int(coords[0])
        y = int(coords[1])

        self.writeCommand(x, y, building)

        return x, y, building
Пример #26
0
 def loss_bmu_right(self):
     bmu = self.k_index
     _bmu = tf.unravel_index(bmu,[self.som_dim[0], self.som_dim[1]])
     righters = tf.multiply(tf.ones_like(_bmu[1]), self.som_dim[1])
     mask = tf.less(_bmu[1], righters) # boolean tensor, mask[i] = True iff x[i] > 0
     _bmu_movable_sx = tf.boolean_mask(_bmu[0], mask)
     _bmu_movable_dx = tf.boolean_mask(_bmu[1], mask)
     _bmu_raveled = tf.multiply(_bmu_movable_sx,self.som_dim[0]) + (tf.add(_bmu_movable_dx, 1) % self.som_dim[1])
     _inputs_idx = tf.boolean_mask(self.inputs, mask)
     current_loss = tf.reduce_mean(
         tf.squared_difference(_inputs_idx, tf.gather(self.codebook, _bmu_raveled)))
     return current_loss
Пример #27
0
    def assign_anchors_to_ground_truths(self, anchors, ground_truths):
        anchors = tf.cast(anchors, tf.float32)
        anchors_shape = tf.shape(anchors)
        ground_truths = tf.cast(ground_truths, tf.float32)
        flattened_ground_truths = tf.reshape(ground_truths, (-1, 4))
        flattened_anchors = tf.reshape(anchors, (-1, 4))
        ious = intersection_over_union(flattened_anchors,
                                       flattened_ground_truths)

        # (ii) anchors with IoU > threshold with any ground truth
        max_iou_per_anchor = tf.reduce_max(ious, axis=1)
        positive_anchors = tf.greater_equal(max_iou_per_anchor,
                                            self.positive_iou_threshold)
        ground_truth_per_anchor = tf.argmax(ious, axis=1)
        positive_anchor_indices_flattened = tf.where(positive_anchors)
        if positive_anchor_indices_flattened.shape[0] == 0:
            # (i) anchor with highest IoU, in case no anchors have ground truth IoU above threshold
            positive_anchor_indices_flattened = tf.argmax(max_iou_per_anchor)
        positive_anchor_indices_flattened = tf.reshape(
            positive_anchor_indices_flattened, [-1])
        positive_gt_indices = tf.gather(ground_truth_per_anchor,
                                        positive_anchor_indices_flattened)
        positive_gt_indices = tf.expand_dims(positive_gt_indices, axis=0)
        positive_anchor_indices = tf.unravel_index(
            positive_anchor_indices_flattened,
            (anchors_shape[1], anchors_shape[2], anchors_shape[3]))
        positive_anchor_indices = tf.expand_dims(positive_anchor_indices,
                                                 axis=0)

        negative_anchors = tf.less_equal(max_iou_per_anchor,
                                         self.negative_iou_threshold)
        negative_anchor_indices_flattened = tf.where(negative_anchors)
        negative_anchor_indices_flattened = tf.reshape(
            negative_anchor_indices_flattened, [-1])
        negative_anchor_indices = tf.unravel_index(
            negative_anchor_indices_flattened,
            (anchors_shape[1], anchors_shape[2], anchors_shape[3]))
        negative_anchor_indices = tf.expand_dims(negative_anchor_indices,
                                                 axis=0)
        return positive_anchor_indices, positive_gt_indices, negative_anchor_indices
Пример #28
0
def spatial_argmax(x):
    N, H, W, C = [
        tf.shape(x)[0],
        tf.shape(x)[1],
        tf.shape(x)[2],
        tf.shape(x)[3]
    ]
    a = tf.reshape(tf.transpose(x, [0, 3, 1, 2]), [N * C, H * W])
    ret = tf.cast(
        tf.unravel_index(tf.argmax(a, axis=1, output_type=tf.int32), [H, W]),
        tf.float32)
    ret = tf.reshape(tf.transpose(ret), [N, C, 2])
    return ret
Пример #29
0
def show_result(model, data, trash=0.35, take=1):
    dataset = data.map(preprocess).batch(4).take(take)
    for xb, yb in dataset:
        for i in range(0, 4):
            mappa = model.predict(xb)[i]

            plt.figure(figsize=(20, 10))
            im = (xb[i].numpy())
            if tf.math.reduce_max(mappa) > trash:
                cent = tf.unravel_index(
                    tf.math.argmax(tf.reshape(mappa, [-1])), [268, 480])
                cv2.circle(im, (cent[1] * 4, cent[0] * 4), 7, (1, 0, 0), 5)
            plt.imshow(im)
Пример #30
0
    def update_relevance(self, relevance_index):
        '''
        finds the most contributing position for a given relevance value and propagates the relevance to this position
        :param relevance_index: index of the relevance value to be analyzed
        :return: a mask with only one non-zero entry (with value equal to the relevance value to be analyzed)
                at the most contributing position. The mask has the same shape as the layer_output.
        '''
        # get x and y range of the relevant part of the picture
        x_start = self.stride * relevance_index[1]
        x_end = self.stride * relevance_index[1] + self.filter_size
        y_start = self.stride * relevance_index[2]
        y_end = self.stride * relevance_index[2] + self.filter_size

        # getting correct input in the batch
        batch_input = self.layer_output[relevance_index[0]]

        # getting the part/window of the input which corresponds to the relevance value
        input_patch = batch_input[x_start:x_end, y_start:y_end, :]

        # getting the filter which corresponds to the relevance value
        weight = self.layer_weights[:, :, :, relevance_index[3]]

        # callculating pointwise product, which is also used during forward propagation
        product = input_patch * weight

        # Getting local position of the most contributing neuron in this window
        old_shape = product.shape
        product = keras.layers.Flatten()(product)
        product = tf.expand_dims(product, axis=0)
        product = keras.layers.Flatten()(product)
        position = tf.argmax(product, axis=-1)[0]
        position = tf.unravel_index(position, old_shape)

        # getting global version of the local position
        global_position = [
            position[0] + x_start, position[1] + y_start, position[2]
        ]

        # ravel gloabl position, indexes are shifted since out.shape has an empty first dimension
        global_index = global_position[0] * self.layer_output.shape[-2] * self.layer_output.shape[-1] + global_position[1] * self.layer_output.shape[-1] + \
                       global_position[2]

        # generate one_hot mask with the relevance value at the global argamx position
        mask = K.one_hot(global_index, tf.reduce_prod(batch_input.shape))
        relevance_value = self.relevance_values[0, relevance_index[1],
                                                relevance_index[2],
                                                relevance_index[3]]
        mask = mask * relevance_value
        mask = K.reshape(mask, batch_input.shape)

        return mask