Exemplo n.º 1
0
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs
Exemplo n.º 2
0
 def call(self, x):
     r = K.cast(K.arange(self.num), K.floatx()) / float(self.num - 1)
     r = self.start + (self.stop - self.start) * r
     r = K.expand_dims(K.expand_dims(r), axis=0)
     r = K.cast(r, dtype=K.floatx())
     r = K.tile(r, (K.shape(x)[0], 1, 1))
     return r
Exemplo n.º 3
0
    def call(self, inputs, **kwargs):
        assert isinstance(inputs, list) and len(inputs) == 3
        first, second, features = inputs[0], inputs[1], inputs[2]
        if not self.from_logits:
            first = kb.clip(first, 1e-10, 1.0)
            second = kb.clip(second, 1e-10, 1.0)
            first_, second_ = kb.log(first), kb.log(second)
        else:
            first_, second_ = first, second
        # embedded_features.shape = (M, T, 1)
        if self.use_intermediate_layer:
            features = kb.dot(features, self.first_kernel)
            features = kb.bias_add(features, self.first_bias, data_format="channels_last")
            features = self.intermediate_activation(features)
        embedded_features = kb.dot(features, self.features_kernel)
        embedded_features = kb.bias_add(
            embedded_features, self.features_bias, data_format="channels_last")
        if self.use_dimension_bias:
            tiling_shape = [1] * (kb.ndim(first)-1) + [kb.shape(first)[-1]]
            embedded_features = kb.tile(embedded_features, tiling_shape)
            embedded_features = kb.bias_add(
                embedded_features, self.dimensions_bias, data_format="channels_last")
        sigma = kb.sigmoid(embedded_features)

        result = weighted_sum(first_, second_, sigma,
                              self.first_threshold, self.second_threshold)
        probs = kb.softmax(result)
        if self.return_logits:
            return [probs, result]
        return probs
    def call(self, x, mask=None):
        x_cont, x_ques, cont_len, ques_len = x

        # get similarity matrix S
        subres0 = K.tile(K.dot(x_cont, self.W0), [1, 1, self.ques_limit])
        subres1 = K.tile(K.permute_dimensions(K.dot(x_ques, self.W1), pattern=(0, 2, 1)), [1, self.cont_limit, 1])
        subres2 = K.batch_dot(x_cont * self.W2, K.permute_dimensions(x_ques, pattern=(0, 2, 1)))
        S = subres0 + subres1 + subres2
        S += self.bias

        S_ = tf.nn.softmax(self.Mask(S, ques_len, axis=1, time_dim=2, mode='add'))
        S_T = K.permute_dimensions(tf.nn.softmax(self.Mask(S, cont_len, axis=2, time_dim=1, mode='add'), axis=1), (0, 2, 1))
        c2q = tf.matmul(S_, x_ques)
        q2c = tf.matmul(tf.matmul(S_, S_T), x_cont)
        result = K.concatenate([x_cont, c2q, x_cont * c2q, x_cont * q2c], axis=-1)

        return result
 def get_initial_state(self, x):
     # x has shape (samples, timesteps, input_dim)
     # build all-zero tensors of shape (samples, whatever)
     initial_state = K.zeros_like(x)  
     initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
     initial_state = K.expand_dims(initial_state)  # (samples, 1)
     lengths = (self.n_units*self.sigsize,self.n_units)
     initial_states = [K.tile(initial_state, [1, i]) for i in lengths]  # (samples, i)
     return initial_states
Exemplo n.º 6
0
    def _get_initial_state(x, inp):
        # TODO: check that all x have the same number of samples / timesteps
        # TODO: test that x has 3 dimensions and inp has two dimensions
        x = x[0]
        input_dim = int(inp.get_shape()[1])

        # copied from keras. Recurrent.get_initial_state
        initial_state = K.zeros_like(x, dtype=inp.dtype)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        initial_state = K.expand_dims(initial_state)  # (samples, 1)
        return K.tile(initial_state, [1, input_dim])  # (samples, output_dim)
Exemplo n.º 7
0
  def get_constants(self, x):
    constants = []
    if 0 < self.dropout_U < 1:
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, self.output_dim))
      B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
      constants.append(B_U)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])

    if 0 < self.dropout_W < 1:
      input_shape = self.input_spec[0].shape
      input_dim = input_shape[-1]
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, input_dim))
      B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
      constants.append(B_W)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])
    return constants
    def _additive_similarity(self, source, query):
        concatenation = K.concatenate([source, query], axis=2)
        nonlinearity = K.tanh(K.dot(concatenation, self._weights["w_a"]))
        
        # tile the weight vector (1, 1, dim) for each time step and each element of the batch -> (bs, T, dim)
        source_shape = K.shape(source)
        vaeff = K.tile(K.expand_dims(self._weights["v_a"], 0), [source_shape[0], source_shape[1], 1])

        similarity = K.batch_dot(K.permute_dimensions(vaeff, [0, 2, 1]), nonlinearity, axes=[1, 2])
        
        return similarity
Exemplo n.º 9
0
    def call(self, x, mask=None):
        if mask is None:
            return super(GlobalAveragePooling1D, self).call(x)

        mask = K.expand_dims(mask)
        mask = K.tile(mask, [1, 1, K.shape(x)[2]])
        mask = K.cast(mask, K.dtype(x))

        safe_mask_sum = K.sum(mask, axis=1)
        safe_mask_sum = K.maximum(safe_mask_sum, K.ones_like(safe_mask_sum))

        return K.sum(mask * x, axis=1) / safe_mask_sum
    def get_initial_state(self, inputs):
        print('inputs shape:', inputs.get_shape())

        # apply the matrix on the first time step to get the initial s0.
        s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))

        # from keras.layers.recurrent to initialize a vector of (batchsize,
        # output_dim)
        y0 = K.zeros_like(inputs)  # (samples, timesteps, input_dims)
        y0 = K.sum(y0, axis=(1, 2))  # (samples, )
        y0 = K.expand_dims(y0)  # (samples, 1)
        y0 = K.tile(y0, [1, self.output_dim])

        return [y0, s0]
Exemplo n.º 11
0
    def _make_regular_grids(self, batch_size, height, width):
        # making a single regular grid
        x_linspace = K_linspace(-1., 1., width)
        y_linspace = K_linspace(-1., 1., height)
        x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace)
        x_coordinates = K.flatten(x_coordinates)
        y_coordinates = K.flatten(y_coordinates)
        ones = K.ones_like(x_coordinates)
        grid = K.concatenate([x_coordinates, y_coordinates, ones], 0)

        # repeating grids for each batch
        grid = K.flatten(grid)
        grids = K.tile(grid, K.stack([batch_size]))
        return K.reshape(grids, (batch_size, 3, height * width))
Exemplo n.º 12
0
    def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_vector]
        # Expand dims to [None, input_num_capsule, 1, 1, input_dim_vector]
        inputs_expand = K.expand_dims(K.expand_dims(inputs, 2), 2)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # Now it has shape = [None, input_num_capsule, num_capsule, 1, input_dim_vector]
        inputs_tiled = K.tile(inputs_expand, [1, 1, self.num_capsule, 1, 1])

        """  
        # Compute `inputs * W` by expanding the first dim of W. More time-consuming and need batch_size.
        # Now W has shape  = [batch_size, input_num_capsule, num_capsule, input_dim_vector, dim_vector]
        w_tiled = K.tile(K.expand_dims(self.W, 0), [self.batch_size, 1, 1, 1, 1])
        
        # Transformed vectors, inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
        inputs_hat = K.batch_dot(inputs_tiled, w_tiled, [4, 3])
        """
        # Compute `inputs * W` by scanning inputs_tiled on dimension 0. This is faster but requires Tensorflow.
        # inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
        inputs_hat = tf.scan(lambda ac, x: K.batch_dot(x, self.W, [3, 2]),
                             elems=inputs_tiled,
                             initializer=K.zeros([self.input_num_capsule, self.num_capsule, 1, self.dim_vector]))
        """
        # Routing algorithm V1. Use tf.while_loop in a dynamic way.
        def body(i, b, outputs):
            c = tf.nn.softmax(self.bias, dim=2)  # dim=2 is the num_capsule dimension
            outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))
            b = b + K.sum(inputs_hat * outputs, -1, keepdims=True)
            return [i-1, b, outputs]

        cond = lambda i, b, inputs_hat: i > 0
        loop_vars = [K.constant(self.num_routing), self.bias, K.sum(inputs_hat, 1, keepdims=True)]
        _, _, outputs = tf.while_loop(cond, body, loop_vars)
        """
        # Routing algorithm V2. Use iteration. V2 and V1 both work without much difference on performance
        assert self.num_routing > 0, 'The num_routing should be > 0.'

        for i in range(self.num_routing):
            c = tf.nn.softmax(self.bias, dim=2)  # dim=2 is the num_capsule dimension
            # outputs.shape=[None, 1, num_capsule, 1, dim_vector]
            outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))

            # last iteration needs not compute bias which will not be passed to the graph any more anyway.
            if i != self.num_routing - 1:
                # self.bias = K.update_add(self.bias, K.sum(inputs_hat * outputs, [0, -1], keepdims=True))
                self.bias += K.sum(inputs_hat * outputs, -1, keepdims=True)
            # tf.summary.histogram('BigBee', self.bias)  # for debugging
        return K.reshape(outputs, [-1, self.num_capsule, self.dim_vector])
Exemplo n.º 13
0
def tf_normal(y_true, mu, sigma, pi):

    rollout_length = K.shape(y_true)[1]
    y_true = K.tile(y_true,(1,1,GAUSSIAN_MIXTURES))
    y_true = K.reshape(y_true, [-1, rollout_length, GAUSSIAN_MIXTURES,Z_DIM])

    oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi)
    result = y_true - mu
#   result = K.permute_dimensions(result, [2,1,0])
    result = result * (1 / (sigma + 1e-8))
    result = -K.square(result)/2
    result = K.exp(result) * (1/(sigma + 1e-8))*oneDivSqrtTwoPI
    result = result * pi
    result = K.sum(result, axis=2) #### sum over gaussians
    #result = K.prod(result, axis=2) #### multiply over latent dims
    return result
Exemplo n.º 14
0
    def get_constants(self, inputs, training=None):
        constants = []
        if 0. < self.recurrent_dropout < 1.:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)

            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(3)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
Exemplo n.º 15
0
def gmsd_loss(y_true, y_pred):
    """
    Improved image quality metric over MS-SSIM with easier calc
    http://www4.comp.polyu.edu.hk/~cslzhang/IQA/GMSD/GMSD.htm
    https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf
    """
    true_edge_mag = scharr_edges(y_true, True)
    pred_edge_mag = scharr_edges(y_pred, True)
    cnst = 0.002
    upper = 2.0 * tf.multiply(true_edge_mag, pred_edge_mag) + cnst
    lower = tf.square(true_edge_mag) + tf.square(pred_edge_mag) + cnst
    gms = tf.div(upper, lower)
    _mean, _var = tf.nn.moments(gms, axes=[1, 2], keep_dims=True)
    # single metric value per image in tensor [?, 1, 1]
    gmsd = tf.reduce_mean(tf.sqrt(_var), axis=-1)
    # need to expand to [?, height, width] dimensions for Keras ... modify to not be hard-coded
    return K.tile(gmsd, [1, 64, 64])
Exemplo n.º 16
0
def expand_tile(units, axis):
    """
    Expand and tile tensor along given axis

    Args:
        units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
        axis: axis along which expand and tile. Must be 1 or 2

    """
    assert axis in (1, 2)
    n_time_steps = K.int_shape(units)[1]
    repetitions = [1, 1, 1, 1]
    repetitions[axis] = n_time_steps
    if axis == 1:
        expanded = Reshape(target_shape=( (1,) + K.int_shape(units)[1:] ))(units)
    else:
        expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units)
    return K.tile(expanded, repetitions)
Exemplo n.º 17
0
    def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_capsule]
        # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
        inputs_expand = K.expand_dims(inputs, 1)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])

        # Compute `inputs * W` by scanning inputs_tiled on dimension 0.
        # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
        # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
        # Regard the first two dimensions as `batch` dimension,
        # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
        # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)

        # Begin: Routing algorithm ---------------------------------------------------------------------#
        # The prior for coupling coefficient, initialized as zeros.
        # b.shape = [None, self.num_capsule, self.input_num_capsule].
        b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])

        assert self.routings > 0, 'The routings should be > 0.'
        for i in range(self.routings):
            # c.shape=[batch_size, num_capsule, input_num_capsule]
            c = tf.nn.softmax(b, dim=1)

            # c.shape =  [batch_size, num_capsule, input_num_capsule]
            # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
            # The first two dimensions as `batch` dimension,
            # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
            # outputs.shape=[None, num_capsule, dim_capsule]
            outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]))  # [None, 10, 16]

            if i < self.routings - 1:
                # outputs.shape =  [None, num_capsule, dim_capsule]
                # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
                # The first two dimensions as `batch` dimension,
                # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
                # b.shape=[batch_size, num_capsule, input_num_capsule]
                b += K.batch_dot(outputs, inputs_hat, [2, 3])
        # End: Routing algorithm -----------------------------------------------------------------------#

        return outputs
Exemplo n.º 18
0
    def test_tile(self):
        shape = (3, 4)
        arr = np.arange(np.prod(shape)).reshape(shape)
        arr_th = KTH.variable(arr)
        arr_tf = KTF.variable(arr)

        n = (2, 1)
        th_z = KTH.tile(arr_th, n)
        th_rep = KTH.eval(th_z)
        tf_rep = KTF.eval(KTF.tile(arr_tf, n))
        assert_allclose(tf_rep, th_rep, atol=1e-05)
        if hasattr(th_z, '_keras_shape'):
            assert th_z._keras_shape == th_rep.shape

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 4))
            n = 2
            y = KTH.tile(x, n)
            assert y._keras_shape == (None, 8)
            n = (4, 3)
            y = K.tile(x, n)
            assert y._keras_shape == (None, 12)
Exemplo n.º 19
0
def yolo_head(feats, anchors, num_classes):
    """Convert final layer features to bounding box parameters.

    Parameters
    ----------
    feats : tensor
        Final convolutional layer features.
    anchors : array-like
        Anchor box widths and heights.
    num_classes : int
        Number of target classes.

    Returns
    -------
    box_xy : tensor
        x, y box predictions adjusted by spatial location in conv layer.
    box_wh : tensor
        w, h box predictions adjusted by anchors and conv spatial resolution.
    box_conf : tensor
        Probability estimate for whether each box contains any object.
    box_class_pred : tensor
        Probability distribution estimate for each box over class labels.
    """
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])
    # Static implementation for fixed models.
    # TODO: Remove or add option for static implementation.
    # _, conv_height, conv_width, _ = K.int_shape(feats)
    # conv_dims = K.variable([conv_width, conv_height])

    # Dynamic implementation of conv dims for fully convolutional model.
    conv_dims = K.shape(feats)[1:3]  # assuming channels last
    # In YOLO the height index is the inner most iteration.
    conv_height_index = K.arange(0, stop=conv_dims[0])
    conv_width_index = K.arange(0, stop=conv_dims[1])
    conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

    # TODO: Repeat_elements and tf.split doesn't support dynamic splits.
    # conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)
    conv_width_index = K.tile(K.expand_dims(conv_width_index, 0),
                              [conv_dims[0], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    conv_index = K.cast(conv_index, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

    # Static generation of conv_index:
    # conv_index = np.array([_ for _ in np.ndindex(conv_width, conv_height)])
    # conv_index = conv_index[:, [1, 0]]  # swap columns for YOLO ordering.
    # conv_index = K.variable(
    #     conv_index.reshape(1, conv_height, conv_width, 1, 2))
    # feats = Reshape(
    #     (conv_dims[0], conv_dims[1], num_anchors, num_classes + 5))(feats)

    box_confidence = K.sigmoid(feats[..., 4:5])
    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_class_probs = K.softmax(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    box_xy = (box_xy + conv_index) / conv_dims
    box_wh = box_wh * anchors_tensor / conv_dims

    return box_confidence, box_xy, box_wh, box_class_probs
Exemplo n.º 20
0
    def call(self, inputs):
        X = inputs[0]  # Node features (N x F)
        A = inputs[1]  # Adjacency matrix (N x N)

        outputs = []
        for head in range(self.attn_heads):
            kernel = self.kernels[head]  # W in the paper (F x F')
            attention_kernel = self.attn_kernels[
                head]  # Attention kernel a in the paper (2F' x 1)

            # Compute inputs to attention network
            features = K.dot(X, kernel)  # (N x F')

            # Compute feature combinations
            # One simplified version of the attention (good for large-scale data)
            # Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
            attn_for_self = K.dot(
                features, attention_kernel[0])  # (N x 1), [a_1]^T [Wh_i]
            attn_for_neighs = K.dot(
                features, attention_kernel[1])  # (N x 1), [a_2]^T [Wh_j]

            # # Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
            dense = attn_for_self + K.transpose(
                attn_for_neighs)  # (N x N) via broadcasting

            # Implementation based on the original paper (good for small-scale data)
            # tile center embeddings
            center_embeddings = K.tile(K.expand_dims(features, 1),
                                       [1, shape(X, 0), 1])  # [N, N, F']
            # tile neighbor embeddings
            neighbor_embedding = K.tile(K.expand_dims(features, 0),
                                        [shape(X, 0), 1, 1])  # [N, N, F']
            # cancat these embeddings
            embedding_pairs = K.concatenate(
                [center_embeddings, neighbor_embedding], 2)  # [N, N, F'*2]
            # prediction the attention score

            dense = K.squeeze(K.dot(embedding_pairs, attention_kernel[2]),
                              2)  # [N, N, 1]

            # Add nonlinearty
            dense = LeakyReLU(alpha=0.2)(dense)

            # Mask values before activation (Vaswani et al., 2017)
            dense += -10e9 * (1.0 - A)

            # Apply softmax to get attention coefficients
            dense = K.softmax(dense)  # (N x N)

            # Apply dropout to features and attention coefficients
            dropout_attn = Dropout(self.dropout_rate)(dense)  # (N x N)
            dropout_feat = Dropout(self.dropout_rate)(features)  # (N x F')

            # Linear combination with neighbors' features
            node_features = K.dot(dropout_attn, dropout_feat)  # (N x F')

            if self.use_bias:
                # add bias
                node_features = K.bias_add(node_features, self.biases[head])

            # Add output of attention head to final output
            outputs.append(node_features)

        # Aggregate the heads' output according to the reduction method
        if self.attn_heads_reduction == 'concat':
            output = K.concatenate(outputs)  # (N x KF')
        else:
            output = K.mean(K.stack(outputs), axis=0)  # N x F')

        output = self.activation(output)
        return output
Exemplo n.º 21
0
def build(F_atom=1,
          F_bond=1,
          N_h1=100,
          N_h2=50,
          N_h3=0,
          inner_act='tanh',
          l2v=0.01,
          lr=0.0003,
          N_hf=20,
          context_weight=150.0,
          enhancement_weight=0.1,
          optimizer=Adadelta(),
          extra_outputs=False,
          TARGET_YIELD=False,
          absolute_score=False):
    '''
    Builds the feed forward model.

    N_e:  maximum number of edits of each type
    N_h1: number of hidden nodes in first layer
    N_h2: number of hidden nodes in second layer
    inner_act: activation function 
    '''

    h_lost = Input(shape=(None, None, F_atom), name="H_lost")
    h_gain = Input(shape=(None, None, F_atom), name="H_gain")
    bond_lost = Input(shape=(None, None, F_bond), name="bond_lost")
    bond_gain = Input(shape=(None, None, F_bond), name="bond_gain")
    reagents = Input(shape=(256, ),
                     name="reagent_FP")  # TODO: remove hard-coded length
    solvent = Input(shape=(6, ), name="solvent_descriptors_c_e_s_a_b_v")
    temp = Input(shape=(1, ), name="temperature_C")

    # h_lost_r    = Reshape((h_lost.shape[0] * h_lost.shape[1], F_atom), name = "flatten H_lost")(h_lost)
    # h_gain_r    = Reshape((h_gain.shape[1] * h_gain.shape[1], F_atom), name = "flatten H_gain")(h_gain)
    # bond_lost_r = Reshape((bond_lost.shape[0] * bond_lost.shape[1], F_bond), name = "flatten bond_lost")(bond_lost)
    # bond_gain_r = Reshape((bond_gain.shape[0] * bond_gain.shape[1], F_bond), name = "flatten bond_gain")(bond_gain)

    # Combine along first three dimensions
    dynamic_reshaper = lambda x: T.reshape(
        x, (x.shape[0] * x.shape[1] * x.shape[2], x.shape[3]), ndim=x.ndim - 2)
    dynamic_reshaper_shape = lambda x: (None, ) + x[3:]

    h_lost_r = Lambda(dynamic_reshaper,
                      output_shape=dynamic_reshaper_shape,
                      name="flatten_H_lost")(h_lost)
    h_gain_r = Lambda(dynamic_reshaper,
                      output_shape=dynamic_reshaper_shape,
                      name="flatten_H_gain")(h_gain)
    bond_lost_r = Lambda(dynamic_reshaper,
                         output_shape=dynamic_reshaper_shape,
                         name="flatten_bond_lost")(bond_lost)
    bond_gain_r = Lambda(dynamic_reshaper,
                         output_shape=dynamic_reshaper_shape,
                         name="flatten_bond_gain")(bond_gain)

    h_lost_h1 = Dense(N_h1,
                      activation=inner_act,
                      W_regularizer=l2(l2v),
                      name="embed H_lost 1")(h_lost_r)
    h_gain_h1 = Dense(N_h1,
                      activation=inner_act,
                      W_regularizer=l2(l2v),
                      name="embed H_gain 1")(h_gain_r)
    bond_lost_h1 = Dense(N_h1,
                         activation=inner_act,
                         W_regularizer=l2(l2v),
                         name="embed bond_lost 1")(bond_lost_r)
    bond_gain_h1 = Dense(N_h1,
                         activation=inner_act,
                         W_regularizer=l2(l2v),
                         name="embed bond_gain 1")(bond_gain_r)

    N_h = N_h1

    if N_h2 > 0:
        h_lost_h2 = Dense(N_h2,
                          activation=inner_act,
                          W_regularizer=l2(l2v),
                          name="embed H_lost 2")(h_lost_h1)
        h_gain_h2 = Dense(N_h2,
                          activation=inner_act,
                          W_regularizer=l2(l2v),
                          name="embed H_gain 2")(h_gain_h1)
        bond_lost_h2 = Dense(N_h2,
                             activation=inner_act,
                             W_regularizer=l2(l2v),
                             name="embed bond_lost 2")(bond_lost_h1)
        bond_gain_h2 = Dense(N_h2,
                             activation=inner_act,
                             W_regularizer=l2(l2v),
                             name="embed bond_gain 2")(bond_gain_h1)
        N_h = N_h2

        if N_h3 > 0:
            h_lost_h = Dense(N_h3,
                             activation=inner_act,
                             W_regularizer=l2(l2v),
                             name="embed H_lost 3")(h_lost_h2)
            h_gain_h = Dense(N_h3,
                             activation=inner_act,
                             W_regularizer=l2(l2v),
                             name="embed H_gain 3")(h_gain_h2)
            bond_lost_h = Dense(N_h3,
                                activation=inner_act,
                                W_regularizer=l2(l2v),
                                name="embed bond_lost 3")(bond_lost_h2)
            bond_gain_h = Dense(N_h3,
                                activation=inner_act,
                                W_regularizer=l2(l2v),
                                name="embed bond_gain 3")(bond_gain_h2)
            N_h = N_h3

        else:
            h_lost_h = h_lost_h2
            h_gain_h = h_gain_h2
            bond_lost_h = bond_lost_h2
            bond_gain_h = bond_gain_h2

    else:
        h_lost_h = h_lost_h1
        h_gain_h = h_gain_h1
        bond_lost_h = bond_lost_h1
        bond_gain_h = bond_gain_h1

    # Re-expand (using tricky Merge layer, where x[0] is actual data and x[1] is only used for shape)
    dynamic_unreshaper = lambda x: T.reshape(x[0], (x[1].shape[0], x[1].shape[
        1], x[1].shape[2], x[0].shape[1]),
                                             ndim=x[0].ndim + 2)
    dynamic_unreshaper_shape = lambda x: x[1][:3] + x[0][1:]

    h_lost_r2 = Lambda(dynamic_unreshaper,
                       output_shape=dynamic_unreshaper_shape,
                       name="expand H_lost edits")([h_lost_h, h_lost])
    h_gain_r2 = Lambda(dynamic_unreshaper,
                       output_shape=dynamic_unreshaper_shape,
                       name="expand H_gain edits")([h_gain_h, h_gain])
    bond_lost_r2 = Lambda(
        dynamic_unreshaper,
        output_shape=dynamic_unreshaper_shape,
        name="expand bond_lost edits")([bond_lost_h, bond_lost])
    bond_gain_r2 = Lambda(
        dynamic_unreshaper,
        output_shape=dynamic_unreshaper_shape,
        name="expand bond_gain edits")([bond_gain_h, bond_gain])

    # Add edits within a single candidate
    sum_along_axis2 = lambda x: K.sum(x, axis=2)
    sum_along_axis2_shape = lambda x: x[:2] + x[3:]
    h_lost_sum = Lambda(sum_along_axis2,
                        output_shape=sum_along_axis2_shape,
                        name="sum H_lost")(h_lost_r2)
    h_gain_sum = Lambda(sum_along_axis2,
                        output_shape=sum_along_axis2_shape,
                        name="sum H_gain")(h_gain_r2)
    bond_lost_sum = Lambda(sum_along_axis2,
                           output_shape=sum_along_axis2_shape,
                           name="sum bond_lost")(bond_lost_r2)
    bond_gain_sum = Lambda(sum_along_axis2,
                           output_shape=sum_along_axis2_shape,
                           name="sum bond_gain")(bond_gain_r2)

    # Sum across edits in their intermediate representation
    try:
        net_sum = merge.concatenate(
            [h_lost_sum, h_gain_sum, bond_lost_sum, bond_gain_sum],
            name="concat across edits")
    except AttributeError:
        net_sum = merge([h_lost_sum, h_gain_sum, bond_lost_sum, bond_gain_sum],
                        mode='concat',
                        name="concat across edits")

    feature_to_feature = Dense(N_hf,
                               activation=inner_act,
                               W_regularizer=l2(l2v))
    net_sum_h = TimeDistributed(feature_to_feature,
                                name="reaction embedding post-sum")(net_sum)

    # Take reagents -> intermediate representation -> cosine similarity to enhance reaction
    reagents_h = Dense(N_hf,
                       activation=inner_act,
                       W_regularizer=l2(l2v),
                       name="reagent fingerprint to features")(reagents)

    # Trick to repeat reagents using merge layer (so N_c is implicit)
    # x[0] is the original vector and x[1] is just to get the number of candidates (shape)
    context_repeater = lambda x: K.repeat(x[0], x[1].shape[1])
    context_repeater_shape = lambda x: (x[0][0], x[1][1]) + x[0][1:]
    reagents_h_rpt = Lambda(
        context_repeater,
        output_shape=context_repeater_shape,
        name="broadcast reagent vector")([reagents_h, h_lost])
    solvent_rpt = Lambda(context_repeater,
                         output_shape=context_repeater_shape,
                         name="broadcast solvent vector")([solvent, h_lost])
    temp_rpt = Lambda(context_repeater,
                      output_shape=context_repeater_shape,
                      name="broadcast temperature")([temp, h_lost])

    # Dot product between reagents and net_sum_h gives enhancement factor
    try:
        enhancement_mul = merge.multiply(
            [net_sum_h, reagents_h_rpt],
            name="multiply reaction with reagents [dot 1/2]")
    except AttributeError:
        enhancement_mul = merge(
            [net_sum_h, reagents_h_rpt],
            mode='mul',
            name="multiply reaction with reagents [dot 1/2]")
    enhancement_r = Lambda(
        lambda x: K.sum(x, axis=-1, keepdims=True),
        output_shape=lambda x: x[:-1] + (1, ),
        name="sum reaction with reagents [dot 2/2]")(enhancement_mul)

    # Converge to G0, C[not real], E, S, A, B, V, and K
    feature_to_params = Dense(8, activation='linear', W_regularizer=l2(l2v))
    params = TimeDistributed(feature_to_params,
                             name="features to K,G0,C,E,S,A,B,V")(net_sum_h)

    # Concatenate enhancement and solvents
    try:
        params_enhancement = merge.concatenate(
            [params, enhancement_r, solvent_rpt, temp_rpt],
            name="concatenate context")
    except AttributeError:
        params_enhancement = merge(
            [params, enhancement_r, solvent_rpt, temp_rpt],
            mode='concat',
            name="concatenate context")

    # # Calculate using thermo-ish
    # # K * exp(- (G0 + delG_solv) / T + enhancement)
    # unscaled_score = Lambda(
    #     lambda x: x[:, :, 0] * K.exp(- (x[:, :, 1] + K.sum(x[:, :, 2:8] * x[:, :, 8:14], axis = -1)) / (x[:, :, 15] + 273.15) + x[:, :, 8]),
    #     output_shape = lambda x: (None, N_c,),
    #     name = "propensity = K * exp(- (G0 + cC + eE + ... + vV) / T + enh.)"
    # )(params_enhancement)

    unscaled_score = Lambda(
        lambda x: x[:, :, 0] - context_weight *
        (x[:, :, 1] + K.sum(x[:, :, 2:8] * x[:, :, 9:15], axis=-1)) /
        (x[:, :, 15] + 273.15) + enhancement_weight * x[:, :, 8],
        output_shape=lambda x: x[:2],
        name="propensity = logK - (G0 + cC + eE + ... + vV) / T + enh.")(
            params_enhancement)

    if absolute_score:
        score = unscaled_score
    elif not TARGET_YIELD:
        score = Activation('softmax', name="scores to probs")(unscaled_score)
    else:
        scaled_score = Activation(
            lambda x: K.exp(x - 3.0),
            name='exponential activation')(unscaled_score)
        # Do not scale score with softmax (which would force 100% conversion)
        # Scale linearly
        score = Lambda(lambda x: x / K.tile(
            K.maximum(1.0, K.sum(x, axis=-1, keepdims=True)), (1, x.shape[1])),
                       name="scale if sum(score)>1")(scaled_score)

    #score = unscaled_score_r

    if extra_outputs:
        model = Model(input=[
            h_lost, h_gain, bond_lost, bond_gain, reagents, solvent, temp
        ],
                      output=[
                          h_lost_sum, h_gain_sum, bond_lost_sum, bond_gain_sum,
                          net_sum, net_sum_h, params, unscaled_score, score
                      ])
        return model

    model = Model(
        input=[h_lost, h_gain, bond_lost, bond_gain, reagents, solvent, temp],
        output=[score])

    # model.summary()

    # Now compile
    if not TARGET_YIELD:
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])
    else:
        model.compile(loss=mse_of_true, optimizer=optimizer)

    return model
Exemplo n.º 22
0
def tile(x):
    reps = [1, 1, 32]
    return K.tile(x, reps)
Exemplo n.º 23
0
 def call(self, x, mask=None):
     shape = list(K.shape(x))
     shape[-1] = 1
     return K.tile(self.b, tuple(shape))
Exemplo n.º 24
0
 def call(self, x, mask=None):
     shape = list(K.shape(x))[:-1] + [1, 1]
     return K.tile(self.b, tuple(shape))
Exemplo n.º 25
0
    def call(self, inputs, training=None, mask=None):
        input_shape = K.shape(inputs)

        if self.rank == 1:
            input_shape = [input_shape[i] for i in range(3)]
            batch_shape, dim, channels = input_shape

            xx_range = K.tile(K.expand_dims(K.arange(0, dim), axis=0),
                              [batch_shape, 1])
            xx_range = K.expand_dims(xx_range, axis=-1)

            xx_channels = K.cast(xx_range, K.floatx())
            xx_channels = xx_channels / K.cast(dim - 1, K.floatx())
            xx_channels = (xx_channels * 2) - 1.

            outputs = K.concatenate([inputs, xx_channels], axis=-1)

        if self.rank == 2:
            if self.data_format == 'channels_first':
                inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])
                input_shape = K.shape(inputs)

            input_shape = [input_shape[i] for i in range(4)]
            batch_shape, dim1, dim2, channels = input_shape

            xx_ones = K.ones([batch_shape, dim2], dtype='int32')
            xx_ones = K.expand_dims(xx_ones, axis=-1)

            xx_range = K.tile(K.expand_dims(K.arange(0, dim1), axis=0),
                              [batch_shape, 1])
            xx_range = K.expand_dims(xx_range, axis=1)
            xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
            xx_channels = K.expand_dims(xx_channels, axis=-1)
            xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])

            yy_ones = K.ones([batch_shape, dim1], dtype='int32')
            yy_ones = K.expand_dims(yy_ones, axis=1)

            yy_range = K.tile(K.expand_dims(K.arange(0, dim2), axis=0),
                              [batch_shape, 1])
            yy_range = K.expand_dims(yy_range, axis=-1)

            yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
            yy_channels = K.expand_dims(yy_channels, axis=-1)
            yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])

            xx_channels = K.cast(xx_channels, K.floatx())
            xx_channels = xx_channels / K.cast(dim1 - 1, K.floatx())
            xx_channels = (xx_channels * 2) - 1.

            yy_channels = K.cast(yy_channels, K.floatx())
            yy_channels = yy_channels / K.cast(dim2 - 1, K.floatx())
            yy_channels = (yy_channels * 2) - 1.

            outputs = K.concatenate([inputs, xx_channels, yy_channels],
                                    axis=-1)

            if self.use_radius:
                rr = K.sqrt(
                    K.square(xx_channels - 0.5) + K.square(yy_channels - 0.5))
                outputs = K.concatenate([outputs, rr], axis=-1)

            if self.data_format == 'channels_first':
                outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])

        if self.rank == 3:
            if self.data_format == 'channels_first':
                inputs = K.permute_dimensions(inputs, [0, 2, 3, 4, 1])
                input_shape = K.shape(inputs)

            input_shape = [input_shape[i] for i in range(5)]
            batch_shape, dim1, dim2, dim3, channels = input_shape

            xx_ones = K.ones([batch_shape, dim3], dtype='int32')
            xx_ones = K.expand_dims(xx_ones, axis=-1)

            xx_range = K.tile(K.expand_dims(K.arange(0, dim2), axis=0),
                              [batch_shape, 1])
            xx_range = K.expand_dims(xx_range, axis=1)

            xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
            xx_channels = K.expand_dims(xx_channels, axis=-1)
            xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])

            xx_channels = K.expand_dims(xx_channels, axis=1)
            xx_channels = K.tile(xx_channels, [1, dim1, 1, 1, 1])

            yy_ones = K.ones([batch_shape, dim2], dtype='int32')
            yy_ones = K.expand_dims(yy_ones, axis=1)

            yy_range = K.tile(K.expand_dims(K.arange(0, dim3), axis=0),
                              [batch_shape, 1])
            yy_range = K.expand_dims(yy_range, axis=-1)

            yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
            yy_channels = K.expand_dims(yy_channels, axis=-1)
            yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])

            yy_channels = K.expand_dims(yy_channels, axis=1)
            yy_channels = K.tile(yy_channels, [1, dim1, 1, 1, 1])

            zz_range = K.tile(K.expand_dims(K.arange(0, dim1), axis=0),
                              [batch_shape, 1])
            zz_range = K.expand_dims(zz_range, axis=-1)
            zz_range = K.expand_dims(zz_range, axis=-1)

            zz_channels = K.tile(zz_range, [1, 1, dim2, dim3])
            zz_channels = K.expand_dims(zz_channels, axis=-1)

            xx_channels = K.cast(xx_channels, K.floatx())
            xx_channels = xx_channels / K.cast(dim2 - 1, K.floatx())
            xx_channels = xx_channels * 2 - 1.

            yy_channels = K.cast(yy_channels, K.floatx())
            yy_channels = yy_channels / K.cast(dim3 - 1, K.floatx())
            yy_channels = yy_channels * 2 - 1.

            zz_channels = K.cast(zz_channels, K.floatx())
            zz_channels = zz_channels / K.cast(dim1 - 1, K.floatx())
            zz_channels = zz_channels * 2 - 1.

            outputs = K.concatenate(
                [inputs, zz_channels, xx_channels, yy_channels], axis=-1)

            if self.data_format == 'channels_first':
                outputs = K.permute_dimensions(outputs, [0, 4, 1, 2, 3])

        return outputs
Exemplo n.º 26
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        if self.stateful:
            self.reset_states()
        else:
            # initial states: all-zero tensor of shape (output_dim)
            self.states = [None]
        input_dim = input_shape[2]
        self.input_dim = input_dim


        if self.flag_return_all_hidden:
            output_dim_h0 = self.K_layers*self.output_dim
        else:
            output_dim_h0 = self.output_dim
        if self.flag_nonnegative:
            self.log_h0 = self.add_weight((self.output_dim,),
                                          initializer='uniform',
                                          name='{}_log_h0'.format(self.name))
            self.h0_last = K.softplus(self.log_h0)
        else:
            self.h0_last = self.add_weight((self.output_dim,),
                                     initializer='zero',
                                     name='{}_h0'.format(self.name))
        if self.flag_return_all_hidden:
            self.h0 = K.tile(self.h0_last, [self.K_layers,])
        else:
            self.h0 = self.h0_last

        for key in self.alt_params:
            param=self.alt_params[key]
            if key in self.keys_trainable:
                flag_trainable=True
            else:
                flag_trainable=False
            pcur = self.add_weight(param.shape,
                                   initializer='zero',
                                   trainable=flag_trainable,
                                   name=('{}_%s' % key).format(self.name))
            pcur.set_value(param)
            #setattr(self, key, pcur)
            self.alt_params[key]=pcur

        self.Wk=[]
        self.Uk=[]
        self.bk=[]
        self.Sk=[]
        for k in range(self.K_layers):
            if ('W' in self.maps_from_alt):
                if isinstance(self.maps_from_alt['W'],list):
                    map_cur=self.maps_from_alt['W'][k]
                else:
                    map_cur=self.maps_from_alt['W']
                Wcur = map_cur(self.alt_params)
            else:
                Wcur = self.add_weight((input_dim, self.output_dim),
                                         initializer=self.init,
                                         name=('{}_W_%d' % k).format(self.name),
                                         regularizer=self.W_regularizer)
            
            if ('U' in self.maps_from_alt):
                if isinstance(self.maps_from_alt['U'],list):
                    map_cur=self.maps_from_alt['U'][k]
                else:
                    map_cur=self.maps_from_alt['U']
                Ucur = map_cur(self.alt_params)
            else:
                Ucur = self.add_weight((self.output_dim, self.output_dim),
                                         initializer=self.inner_init,
                                         name=('{}_U_%d' % k).format(self.name),
                                         regularizer=self.U_regularizer)
            
            if ('b' in self.maps_from_alt):
                if isinstance(self.maps_from_alt['b'],list):
                    map_cur=self.maps_from_alt['b'][k]
                else:
                    map_cur=self.maps_from_alt['b']
                bcur = map_cur(self.alt_params)
            else:
                bcur = self.add_weight((self.output_dim,),
                                         initializer='zero',
                                         name=('{}_b_%d' % k).format(self.name),
                                         regularizer=self.b_regularizer)
            
            self.Wk.append(Wcur)
            self.Uk.append(Ucur)
            self.bk.append(bcur)
            
            if k>0:
                if ('S' in self.maps_from_alt):
                    if isinstance(self.maps_from_alt['S'],list):
                        map_cur=self.maps_from_alt['S'][k-1]
                    else:
                        map_cur=self.maps_from_alt['S']
                    Scur = map_cur(self.alt_params)
                else:
                    Scur = self.add_weight((self.output_dim, self.output_dim),
                                             initializer=self.inner_init,
                                             name=('{}_S_%dto%d' % (k-1,k)).format(self.name))
                
                self.Sk.append(Scur)
        
        """
        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        """
        self.built = True
    def call(self, inputs, mask=None, **kwargs):
        if isinstance(inputs, list):
            inputs, positions = inputs
            positions = K.cast(positions, 'int32')
            mask = mask[1]
        else:
            positions = None

        input_len = K.shape(inputs)[1]

        if self.attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:
            e = self._call_additive_emission(inputs)
        elif self.attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:
            e = self._call_multiplicative_emission(inputs)

        if self.attention_activation is not None:
            e = self.attention_activation(e)
        e = K.exp(e - K.max(e, axis=-1, keepdims=True))
        if self.attention_width is not None:
            ones = tf.ones((input_len, input_len))
            if self.history_only:
                local = tf.matrix_band_part(
                    ones,
                    K.minimum(input_len, self.attention_width - 1),
                    0,
                )
            else:
                local = tf.matrix_band_part(
                    ones,
                    K.minimum(input_len, self.attention_width // 2),
                    K.minimum(input_len, (self.attention_width - 1) // 2),
                )
            e = e * K.expand_dims(local, 0)
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            mask = K.expand_dims(mask)
            e = K.permute_dimensions(
                K.permute_dimensions(e * mask, (0, 2, 1)) * mask, (0, 2, 1))

        # a_{t} = \text{softmax}(e_t)
        s = K.sum(e, axis=-1)
        s = K.tile(K.expand_dims(s, axis=-1), K.stack([1, 1, input_len]))
        a = e / (s + K.epsilon())

        # l_t = \sum_{t'} a_{t, t'} x_{t'}
        v = K.batch_dot(a, inputs)
        if self.attention_regularizer_weight > 0.0:
            self.add_loss(self._attention_regularizer(a))

        if positions is not None:
            pos_num = K.shape(positions)[1]
            batch_indices = K.tile(
                K.expand_dims(K.arange(K.shape(inputs)[0]), axis=-1),
                K.stack([1, pos_num]))
            pos_indices = K.stack([batch_indices, positions], axis=-1)
            v = tf.gather_nd(v, pos_indices)
            a = tf.gather_nd(a, pos_indices)

        if self.return_attention:
            return [v, a]
        return v
Exemplo n.º 28
0
 def loss(y_true, y_pred):
     y_pred_sum = K.tile(K.sum(y_pred, axis=1), 3)
     y_true_sum = K.tile(K.sum(y_true, axis=1), 3)
     y_pred = y_pred / K.reshape(y_pred_sum, (-1, 3))
     y_true = y_true / K.reshape(y_true_sum, (-1, 3))
     return K.mean(K.abs(y_pred - y_true), axis=-1)
Exemplo n.º 29
0
 def call(self, x, mask=None):
     assert isinstance(x, list)
     assert len(x) == 3
     return K.tile(K.expand_dims(K.flatten(wcorr(*x)), axis=1),
                   K.shape(x[1]))
Exemplo n.º 30
0
 def label_expand(self, x):
     # unsqueeze
     x_unsqueeze = K.expand_dims(K.expand_dims(K.expand_dims(x, -1), -1),
                                 -1)
     return K.tile(x_unsqueeze,
                   (1, 1, self.ImageRow, self.ImageColumn, self.ImageDepth))
Exemplo n.º 31
0
    def call(self, input_tensor, training=None):
        input_transposed = tf.transpose(input_tensor, [3, 0, 1, 2, 4])
        input_shape = K.shape(input_transposed)
        input_tensor_reshaped = K.reshape(input_transposed, [
            input_shape[1] * input_shape[0], self.input_height,
            self.input_width, self.input_num_atoms
        ])
        input_tensor_reshaped.set_shape(
            (None, self.input_height, self.input_width, self.input_num_atoms))

        if self.upsamp_type == 'resize':
            upsamp = K.resize_images(input_tensor_reshaped, self.scaling,
                                     self.scaling, 'channels_last')
            outputs = K.conv2d(upsamp,
                               kernel=self.W,
                               strides=(1, 1),
                               padding=self.padding,
                               data_format='channels_last')
        elif self.upsamp_type == 'subpix':
            conv = K.conv2d(input_tensor_reshaped,
                            kernel=self.W,
                            strides=(1, 1),
                            padding='same',
                            data_format='channels_last')
            outputs = tf.depth_to_space(conv, self.scaling)
        else:
            batch_size = input_shape[1] * input_shape[0]

            # Infer the dynamic output shape:
            out_height = deconv_length(self.input_height, self.scaling,
                                       self.kernel_size, self.padding)
            out_width = deconv_length(self.input_width, self.scaling,
                                      self.kernel_size, self.padding)
            output_shape = (batch_size, out_height, out_width,
                            self.num_capsule * self.num_atoms)

            outputs = K.conv2d_transpose(input_tensor_reshaped,
                                         self.W,
                                         output_shape,
                                         (self.scaling, self.scaling),
                                         padding=self.padding,
                                         data_format='channels_last')

        votes_shape = K.shape(outputs)
        _, conv_height, conv_width, _ = outputs.get_shape()

        votes = K.reshape(outputs, [
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
            self.num_capsule, self.num_atoms
        ])
        votes.set_shape((None, self.input_num_capsule, conv_height.value,
                         conv_width.value, self.num_capsule, self.num_atoms))
        # votes.set_shape((None, self.input_num_capsule, conv_height, conv_width,
        #                 self.num_capsule, self.num_atoms))

        logit_shape = K.stack([
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
            self.num_capsule
        ])
        biases_replicated = K.tile(self.b,
                                   [votes_shape[1], votes_shape[2], 1, 1])

        activations = update_routing(votes=votes,
                                     biases=biases_replicated,
                                     logit_shape=logit_shape,
                                     num_dims=6,
                                     input_dim=self.input_num_capsule,
                                     output_dim=self.num_capsule,
                                     num_routing=self.routings)

        return activations
    def call(self, x, mask=None):
        '''
        Return an anchor box tensor based on the shape of the input tensor.

        The logic implemented here is identical to the logic in the module `ssd_box_encode_decode_utils.py`.

        Note that this tensor does not participate in any graph computations at runtime. It is being created
        as a constant once during graph creation and is just being output along with the rest of the model output
        during runtime. Because of this, all logic is implemented as Numpy array operations and it is sufficient
        to convert the resulting Numpy array into a Keras tensor at the very end before outputting it.

        Arguments:
            x (tensor): 4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
                or `(batch, height, width, channels)` if `dim_ordering = 'tf'`. The input for this
                layer must be the output of the localization predictor layer.
        '''

        # Compute box width and height for each aspect ratio
        # The shorter side of the image will be used to compute `w` and `h` using `scale` and `aspect_ratios`.
        self.aspect_ratios = np.sort(self.aspect_ratios)
        size = min(self.img_height, self.img_width)
        # Compute the box widths and and heights for all aspect ratios
        wh_list = []
        for ar in self.aspect_ratios:
            if (ar == 1) & self.two_boxes_for_ar1:
                # Compute the regular default box for aspect ratio 1 and...
                w = self.this_scale * size * np.sqrt(ar)
                h = self.this_scale * size / np.sqrt(ar)
                wh_list.append((w,h))
                # ...also compute one slightly larger version using the geometric mean of this scale value and the next
                w = np.sqrt(self.this_scale * self.next_scale) * size * np.sqrt(ar)
                h = np.sqrt(self.this_scale * self.next_scale) * size / np.sqrt(ar)
                wh_list.append((w,h))
            else:
                w = self.this_scale * size * np.sqrt(ar)
                h = self.this_scale * size / np.sqrt(ar)
                wh_list.append((w,h))
        wh_list = np.array(wh_list)

        # We need the shape of the input tensor
        if K.image_dim_ordering() == 'tf':
            batch_size, feature_map_height, feature_map_width, feature_map_channels = x._keras_shape

        else: # Not yet relevant since TensorFlow is the only supported backend right now, but it can't harm to have this in here for the future
            batch_size, feature_map_channels, feature_map_height, feature_map_width = x._keras_shape

        # Compute the grid of box center points. They are identical for all aspect ratios
        cell_height = self.img_height / feature_map_height
        cell_width = self.img_width / feature_map_width
        cx = np.linspace(cell_width/2, self.img_width-cell_width/2, feature_map_width)
        cy = np.linspace(cell_height/2, self.img_height-cell_height/2, feature_map_height)
        cx_grid, cy_grid = np.meshgrid(cx, cy)
        cx_grid = np.expand_dims(cx_grid, -1) # This is necessary for np.tile() to do what we want further down
        cy_grid = np.expand_dims(cy_grid, -1) # This is necessary for np.tile() to do what we want further down

        # Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
        # where the last dimension will contain `(cx, cy, w, h)`
        boxes_tensor = np.zeros((feature_map_height, feature_map_width, self.n_boxes, 4))

        boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes)) # Set cx
        boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes)) # Set cy
        boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
        boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h

        # Convert `(cx, cy, w, h)` to `(xmin, xmax, ymin, ymax)`
        boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='centroids2minmax')

        # If `limit_boxes` is enabled, clip the coordinates to lie within the image boundaries
        if self.limit_boxes:
            x_coords = boxes_tensor[:,:,:,[0, 1]]
            x_coords[x_coords >= self.img_width] = self.img_width - 1
            x_coords[x_coords < 0] = 0
            boxes_tensor[:,:,:,[0, 1]] = x_coords
            y_coords = boxes_tensor[:,:,:,[2, 3]]
            y_coords[y_coords >= self.img_height] = self.img_height - 1
            y_coords[y_coords < 0] = 0
            boxes_tensor[:,:,:,[2, 3]] = y_coords

        # `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
        if self.normalize_coords:
            boxes_tensor[:, :, :, :2] /= self.img_width
            boxes_tensor[:, :, :, 2:] /= self.img_height

        if self.coords == 'centroids':
            # TODO: Implement box limiting directly for `(cx, cy, w, h)` so that we don't have to unnecessarily convert back and forth
            # Convert `(xmin, xmax, ymin, ymax)` back to `(cx, cy, w, h)`
            boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='minmax2centroids')

        # 4: Create a tensor to contain the variances and append it to `boxes_tensor`. This tensor has the same shape
        #    as `boxes_tensor` and simply contains the same 4 variance values for every position in the last axis.
        variances_tensor = np.zeros_like(boxes_tensor) # Has shape `(feature_map_height, feature_map_width, n_boxes, 4)`
        variances_tensor += self.variances # Long live broadcasting
        # Now `boxes_tensor` becomes a tensor of shape `(feature_map_height, feature_map_width, n_boxes, 8)`
        boxes_tensor = np.concatenate((boxes_tensor, variances_tensor), axis=-1)

        # Now prepend one dimension to `boxes_tensor` to account for the batch size and tile it along
        # The result will be a 5D tensor of shape `(batch_size, feature_map_height, feature_map_width, n_boxes, 8)`
        boxes_tensor = np.expand_dims(boxes_tensor, axis=0)
        boxes_tensor = K.tile(K.constant(boxes_tensor, dtype='float32'), (K.shape(x)[0], 1, 1, 1, 1))

        return boxes_tensor
Exemplo n.º 33
0
    def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_vector]
        # Expand dims to [None, input_num_capsule, 1, 1, input_dim_vector]
        inputs_expand = K.expand_dims(K.expand_dims(inputs, 2), 2)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # Now it has shape = [None, input_num_capsule, num_capsule, 1, input_dim_vector]
        inputs_tiled = K.tile(inputs_expand, [1, 1, self.num_capsule, 1, 1])
        """ 
        # Begin: inputs_hat computation V1 ---------------------------------------------------------------------#
        # Compute `inputs * W` by expanding the first dim of W. More time-consuming and need batch_size.
        # w_tiled.shape = [batch_size, input_num_capsule, num_capsule, input_dim_vector, dim_vector]
        w_tiled = K.tile(K.expand_dims(self.W, 0), [self.batch_size, 1, 1, 1, 1])
        
        # Transformed vectors, inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
        inputs_hat = K.batch_dot(inputs_tiled, w_tiled, [4, 3])
        # End: inputs_hat computation V1 ---------------------------------------------------------------------#
        """

        # Begin: inputs_hat computation V2 ---------------------------------------------------------------------#
        # Compute `inputs * W` by scanning inputs_tiled on dimension 0. This is faster but requires Tensorflow.
        # inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
        inputs_hat = tf.scan(lambda ac, x: K.batch_dot(x, self.W, [3, 2]),
                             elems=inputs_tiled,
                             initializer=K.zeros([
                                 self.input_num_capsule, self.num_capsule, 1,
                                 self.dim_vector
                             ]))
        # End: inputs_hat computation V2 ---------------------------------------------------------------------#
        """
        # Begin: routing algorithm V1, dynamic ------------------------------------------------------------#
        def body(i, b, outputs):
            c = tf.nn.softmax(b, dim=2)  # dim=2 is the num_capsule dimension
            outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))
            if i != 1:
                b = b + K.sum(inputs_hat * outputs, -1, keepdims=True)
            return [i-1, b, outputs]

        cond = lambda i, b, inputs_hat: i > 0
        loop_vars = [K.constant(self.num_routing), self.bias, K.sum(inputs_hat, 1, keepdims=True)]
        shape_invariants = [tf.TensorShape([]),
                            tf.TensorShape([None, self.input_num_capsule, self.num_capsule, 1, 1]),
                            tf.TensorShape([None, 1, self.num_capsule, 1, self.dim_vector])]
        _, _, outputs = tf.while_loop(cond, body, loop_vars, shape_invariants)
        # End: routing algorithm V1, dynamic ------------------------------------------------------------#
        """

        # Begin: routing algorithm V2, static -----------------------------------------------------------#
        # Routing algorithm V2. Use iteration. V2 and V1 both work without much difference on performance
        assert self.num_routing > 0, 'The num_routing should be > 0.'
        for i in range(self.num_routing):
            c = tf.nn.softmax(self.bias,
                              dim=2)  # dim=2 is the num_capsule dimension
            # outputs.shape=[None, 1, num_capsule, 1, dim_vector]
            outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))

            # last iteration needs not compute bias which will not be passed to the graph any more anyway.
            if i != self.num_routing - 1:
                # self.bias = K.update_add(self.bias, K.sum(inputs_hat * outputs, [0, -1], keepdims=True))
                self.bias += K.sum(inputs_hat * outputs, -1, keepdims=True)
            # tf.summary.histogram('BigBee', self.bias)  # for debugging
        # End: routing algorithm V2, static ------------------------------------------------------------#

        return K.reshape(outputs, [-1, self.num_capsule, self.dim_vector])
Exemplo n.º 34
0
    def call(self, x, mask=None):

        # 计算每个横纵比的box 宽高
        size = min(self.img_height, self.img_width)
        wh_list = []
        for ar in self.aspect_ratios:
            if (ar == 1):
                box_height = box_width = self.this_scale * size
                wh_list.append((box_width, box_height))
                if self.two_boxes_for_ar1:
                    # 使用这个值和下一个值两者的几何平均值 来计算一个稍大的scale。
                    box_height = box_width = np.sqrt(
                        self.this_scale * self.next_scale) * size
                    wh_list.append((box_width, box_height))
            else:
                box_height = self.this_scale * size / np.sqrt(ar)
                box_width = self.this_scale * size * np.sqrt(ar)
                wh_list.append((box_width, box_height))
        wh_list = np.array(wh_list)

        if K.image_dim_ordering() == 'tf':
            batch_size, feature_map_height, feature_map_width, feature_map_channels = x._keras_shape
        else:
            batch_size, feature_map_channels, feature_map_height, feature_map_width = x._keras_shape

        # 计算box网格中心点,它们对于所有纵横比都是相同的。

        # 计算步长
        if (self.this_steps is None):
            step_height = self.img_height / feature_map_height
            step_width = self.img_width / feature_map_width
        else:
            if isinstance(self.this_steps,
                          (list, tuple)) and (len(self.this_steps) == 2):
                step_height = self.this_steps[0]
                step_width = self.this_steps[1]
            elif isinstance(self.this_steps, (int, float)):
                step_height = self.this_steps
                step_width = self.this_steps
        # 计算 offsets
        if (self.this_offsets is None):
            offset_height = 0.5
            offset_width = 0.5
        else:
            if isinstance(self.this_offsets,
                          (list, tuple)) and (len(self.this_offsets) == 2):
                offset_height = self.this_offsets[0]
                offset_width = self.this_offsets[1]
            elif isinstance(self.this_offsets, (int, float)):
                offset_height = self.this_offsets
                offset_width = self.this_offsets
        # 计算中心点坐标
        cy = np.linspace(offset_height * step_height,
                         (offset_height + feature_map_height - 1) *
                         step_height, feature_map_height)
        cx = np.linspace(offset_width * step_width,
                         (offset_width + feature_map_width - 1) * step_width,
                         feature_map_width)
        cx_grid, cy_grid = np.meshgrid(cx, cy)
        cx_grid = np.expand_dims(cx_grid, -1)
        cy_grid = np.expand_dims(cy_grid, -1)

        # 组织返回值:(feature_map_height, feature_map_width, n_boxes, 4)

        boxes_tensor = np.zeros(
            (feature_map_height, feature_map_width, self.n_boxes, 4))

        # 最后一维包含:(cx, cy, w, h)`
        boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes))  # cx
        boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes))  # cy
        boxes_tensor[:, :, :, 2] = wh_list[:, 0]  # w
        boxes_tensor[:, :, :, 3] = wh_list[:, 1]  # h

        # 转换坐标: (cx, cy, w, h) -》(xmin, xmax, ymin, ymax)
        boxes_tensor = convert_coordinates(boxes_tensor,
                                           start_index=0,
                                           conversion='centroids2corners')

        # 将坐标限制在image size内
        if self.clip_boxes:
            x_coords = boxes_tensor[:, :, :, [0, 2]]
            x_coords[x_coords >= self.img_width] = self.img_width - 1
            x_coords[x_coords < 0] = 0
            boxes_tensor[:, :, :, [0, 2]] = x_coords
            y_coords = boxes_tensor[:, :, :, [1, 3]]
            y_coords[y_coords >= self.img_height] = self.img_height - 1
            y_coords[y_coords < 0] = 0
            boxes_tensor[:, :, :, [1, 3]] = y_coords

        # 归一化
        if self.normalize_coords:
            boxes_tensor[:, :, :, [0, 2]] /= self.img_width
            boxes_tensor[:, :, :, [1, 3]] /= self.img_height

        # 转换坐标
        if self.coords == 'centroids':
            # 转回 (xmin, ymin, xmax, ymax) -》 (cx, cy, w, h)
            boxes_tensor = convert_coordinates(boxes_tensor,
                                               start_index=0,
                                               conversion='corners2centroids',
                                               border_pixels='half')
        elif self.coords == 'minmax':
            # 转换 (xmin, ymin, xmax, ymax) -》 (xmin, xmax, ymin, ymax)
            boxes_tensor = convert_coordinates(boxes_tensor,
                                               start_index=0,
                                               conversion='corners2minmax',
                                               border_pixels='half')

        # 含方差张量,该张量与 boxes_tensor 形状相同
        #形状为:(feature_map_height, feature_map_width, n_boxes, 4)
        variances_tensor = np.zeros_like(boxes_tensor)
        variances_tensor += self.variances

        # 此时 boxes_tensor为:(feature_map_height, feature_map_width, n_boxes, 8)
        boxes_tensor = np.concatenate((boxes_tensor, variances_tensor),
                                      axis=-1)

        # 结果:(batch_size, feature_map_height, feature_map_width, n_boxes, 8)
        boxes_tensor = np.expand_dims(boxes_tensor, axis=0)
        boxes_tensor = K.tile(K.constant(boxes_tensor, dtype='float32'),
                              (K.shape(x)[0], 1, 1, 1, 1))

        return boxes_tensor
Exemplo n.º 35
0
def get_model():
    MAX_QUERIES = Config.MAX_QUERIES
    NUM_COL = Config.NUM_COL
    MAX_ENTITY_LENGTH = Config.MAX_ENTITY_LENGTH
    CONV_VOCAB_LEN = Config.CONV_VOCAB_LEN
    NUM_INTENTS = Config.NUM_INTENTS
    OPERATOR_LEN = Config.OPERATOR_LEN
    MAX_DB_RESULTS = Config.MAX_DB_RESULTS

    bs_input = Input(shape=(MAX_QUERIES, NUM_COL, MAX_ENTITY_LENGTH,
                            CONV_VOCAB_LEN))
    intent_input = Input(shape=(
        MAX_QUERIES,
        NUM_INTENTS,
    ))
    operation_input = Input(shape=(MAX_QUERIES, NUM_COL, OPERATOR_LEN))

    bs_proc = TimeDistributed(
        TimeDistributed(TimeDistributed(Dense(
            10, activation='sigmoid'))))(bs_input)
    LSTM_bs_emb = TimeDistributed(
        TimeDistributed(LSTM(50, return_sequences=False,
                             return_state=False)))(bs_proc)
    rep_intent_input = TimeDistributed(RepeatVector(NUM_COL))(intent_input)
    print(LSTM_bs_emb.shape)
    all_steps = Concatenate(axis=-1)(
        [LSTM_bs_emb, operation_input, rep_intent_input])
    all_steps = Lambda(lambda x: tf.reshape(
        x,
        shape=(-1, MAX_QUERIES, NUM_COL * (50 + OPERATOR_LEN + NUM_INTENTS))))(
            all_steps)
    encoder_lstm = Dense(50, activation='relu')(all_steps)
    encoder_lstm = TimeDistributed(RepeatVector(MAX_DB_RESULTS))(encoder_lstm)

    decoder_lstm1 = TimeDistributed(LSTM(50,
                                         return_sequences=True))(encoder_lstm)

    decoder_lstm1 = Dense(NUM_COL * 50, activation='relu')(decoder_lstm1)
    decoder_lstm1 = Lambda(lambda x: tf.reshape(
        x, shape=(-1, MAX_QUERIES, MAX_DB_RESULTS, NUM_COL, 50)))(
            decoder_lstm1)

    decoder_lstm2 = TimeDistributed(
        Lambda(lambda x: K.tile(K.expand_dims(
            x, axis=-2), [1, 1, 1, MAX_ENTITY_LENGTH, 1])))(decoder_lstm1)
    decoder_lstm3 = TimeDistributed(
        TimeDistributed(TimeDistributed(LSTM(
            10, return_sequences=True))))(decoder_lstm2)

    out = TimeDistributed(
        TimeDistributed(
            TimeDistributed(
                TimeDistributed(Dense(CONV_VOCAB_LEN,
                                      activation='softmax')))))(decoder_lstm3)
    db_model = Model(inputs=[bs_input, intent_input, operation_input],
                     outputs=[out])

    db_model.summary()

    db_model.compile(optimizer='adam', loss='categorical_crossentropy')
    return db_model
Exemplo n.º 36
0
def add_loss(locations, confidences, batched_bboxes, batched_num_bboxes,
             bbox_priors, location_loss_alpha):
    batch_size = tf.shape(locations)[0]  # locations.get_shape().as_list()[0]

    # ground truth bounding boxes:
    # [batch_size, # of ground truth bounding boxes, 4]
    # we also need to know the number of ground truth bounding boxes for each image in the batch
    # (it can be different for each image...)
    # We could assume 1 for now.

    # Pass the locations, confidences, and ground truth labels to the matching function
    locations = tf.reshape(locations, [-1, 4])
    confidences = tf.reshape(confidences, [-1])

    # add the priors to the predicted residuals
    locations += tf.tile(bbox_priors, [batch_size, 1])

    # add a small epsilon to the confidences
    confidences += SMALL_EPSILON

    # print "Shapes"
    # print locations.get_shape().as_list()
    # print confidences.get_shape().as_list()
    # print batched_bboxes.get_shape().as_list()
    # print batched_num_bboxes.get_shape().as_list()
    params = [
        locations, confidences, batched_bboxes, batched_num_bboxes, batch_size,
        location_loss_alpha
    ]
    matching, stacked_gt_bboxes = tf.py_func(compute_assignments,
                                             params, [tf.int32, tf.float32],
                                             name="bipartite_matching")

    # matching: [num_predictions * batch_size] 0s and 1s for partitioning
    # stacked_gt_bboxes : [total number of gt bboxes for this batch, 4]

    # dynamic partition the bounding boxes and confidences into "positives" and "negatives"
    unmatched_locations, matched_locations = tf.dynamic_partition(
        locations, matching, 2)
    unmatched_confidences, matched_confidences = tf.dynamic_partition(
        confidences, matching, 2)

    # Because we just did a dynamic partition, it could be the case that either the unmatched or matched matrices is empty.
    # It could also be the case that there were no ground truth bboxes in this batch.
    # Lets tack on some default values so that the loss calculations are well behaved.
    matched_locations = tf.concat(0, [matched_locations, tf.zeros([1, 4])])
    stacked_gt_bboxes = tf.concat(0, [stacked_gt_bboxes, tf.zeros([1, 4])])
    matched_confidences = tf.concat(0, [matched_confidences, tf.ones([1])])
    unmatched_confidences = tf.concat(
        0, [unmatched_confidences, tf.zeros([1])])

    location_loss = location_loss_alpha * tf.nn.l2_loss(matched_locations -
                                                        stacked_gt_bboxes)
    confidence_loss = -1. * tf.reduce_sum(
        tf.log(matched_confidences)) - tf.reduce_sum(
            tf.log((1. - unmatched_confidences) + SMALL_EPSILON))

    # It could be the case that there are no ground truth bounding boxes
    # num_gt_bboxes = tf.reduce_sum(batched_num_bboxes)

    # loc_loss = lambda: location_loss_alpha * tf.nn.l2_loss(matched_locations - stacked_gt_bboxes)
    # zero_loc_loss = lambda: tf.zeros(shape=[])
    # location_loss = tf.cond(num_gt_bboxes > 0, loc_loss, zero_loc_loss)

    # conf_loss = lambda: -1. * tf.reduce_sum(tf.log(matched_confidences)) - tf.reduce_sum(tf.log((1. - unmatched_confidences) + SMALL_EPSILON))
    # all_negative_conf_loss = lambda : -1. * tf.reduce_sum(tf.log((1. - unmatched_confidences) + SMALL_EPSILON))
    # confidence_loss = tf.cond(num_gt_bboxes > 0, conf_loss, all_negative_conf_loss)

    slim.losses.add_loss(location_loss)
    slim.losses.add_loss(confidence_loss)

    return location_loss, confidence_loss
 def call(self, x):
     samples = K.shape(x)[0]
     time = K.shape(x)[1]
     pos_enc = self.embedding_layer(
         K.reshape(K.arange(time, dtype='int32'), (1, -1)))
     return K.tile(pos_enc, (samples, 1, 1))
Exemplo n.º 38
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        if self.stateful:
            self.reset_states()
        else:
            # initial states: all-zero tensor of shape (output_dim)
            self.states = [None]
        input_dim = input_shape[2]
        self.input_dim = input_dim

        self.W = self.init((input_dim, self.output_dim),
                           name='{}_W'.format(self.name))
        #self.b = K.zeros((self.N,), name='{}_b'.format(self.name))
        self.b = initializations.uniform((self.N, ),
                                         scale=0.01,
                                         name='{}_b'.format(self.name))
        self.baug = K.tile(self.b, [2])

        h0 = self.h0_mean + initializations.uniform(
            (2 * self.N, ), scale=0.01).get_value()
        self.h0 = K.variable(h0, name='{}_h0'.format(self.name))

        if ('full' in self.unitary_impl):
            # we're using a full unitary recurrence matrix

            if (self.inner_init == 'svd'):
                # use SVD to initialize U
                self.U = unitary_svd_init((self.N, self.N),
                                          name='{}_U'.format(self.name))
            elif (self.inner_init == 'ASB2016'):
                # use parameterization of [ASB2016] to initialize U
                Uaug, _, _, _ = unitary_ASB2016_init((self.N, self.N))
                Uaug = Uaug.eval()
                self.U = K.variable(np.concatenate(
                    (Uaug[:self.N, :self.N], Uaug[:self.N, self.N:]), axis=0),
                                    name='{}_U'.format(self.name))

            self.Uaug = augRight(self.U, module=K)

        elif (self.unitary_impl == 'ASB2016'):
            # we're using the parameterization of [Arjovsky, Shah, Bengio 2016]
            self.Uaug, self.theta, self.reflection, _ = unitary_ASB2016_init(
                (self.N, self.N), name=self.name)

        # set the trainable weights
        if ('full' in self.unitary_impl):
            self.trainable_weights = [self.W, self.U, self.b, self.h0]
        elif (self.unitary_impl == 'ASB2016'):
            self.trainable_weights = [
                self.W, self.theta, self.reflection, self.b, self.h0
            ]

        self.regularizers = []
        #if self.W_regularizer:
        #    self.W_regularizer.set_param(self.W)
        #    self.regularizers.append(self.W_regularizer)
        #if self.U_regularizer:
        #    self.U_regularizer.set_param(self.U)
        #    self.regularizers.append(self.U_regularizer)
        #if self.b_regularizer:
        #    self.b_regularizer.set_param(self.b)
        #    self.regularizers.append(self.b_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
Exemplo n.º 39
0
    def call(self,
             inputs,
             initial_state=None,
             initial_readout=None,
             ground_truth=None,
             mask=None,
             training=None):
        # input shape: `(samples, time (padded with zeros), input_dim)`
        # note that the .build() method of subclasses MUST define
        # self.input_spec and self.state_spec with complete input shapes.
        if type(mask) is list:
            mask = mask[0]
        if self.model is None:
            raise Exception('Empty RecurrentModel.')
        num_req_states = self.num_states
        if self.readout:
            num_actual_states = num_req_states - 1
        else:
            num_actual_states = num_req_states
        if type(inputs) is list:
            inputs_list = inputs[:]
            inputs = inputs_list.pop(0)
            initial_states = inputs_list[:num_actual_states]
            if len(initial_states) > 0:
                if self._is_optional_input_placeholder(initial_states[0]):
                    initial_states = self.get_initial_state(inputs)
            inputs_list = inputs_list[num_actual_states:]
            if self.readout:
                initial_readout = inputs_list.pop(0)
                if self.teacher_force:
                    ground_truth = inputs_list.pop()
        else:
            if initial_state is not None:
                if not isinstance(initial_state, (list, tuple)):
                    initial_states = [initial_state]
                else:
                    initial_states = list(initial_state)
                if self._is_optional_input_placeholder(initial_states[0]):
                    initial_states = self.get_initial_state(inputs)

            elif self.stateful:
                initial_states = self.states
            else:
                initial_states = self.get_initial_state(inputs)
        if self.readout:
            if initial_readout is None or self._is_optional_input_placeholder(
                    initial_readout):
                output_shape = K.int_shape(_to_list((self.model.output))[0])
                output_ndim = len(output_shape)
                input_ndim = K.ndim(inputs)
                initial_readout = K.zeros_like(inputs)
                slices = [slice(None)] + [0] * (input_ndim - 1)
                initial_readout = initial_readout[slices]  # (batch_size,)
                initial_readout = K.reshape(initial_readout,
                                            (-1, ) + (1, ) * (output_ndim - 1))
                initial_readout = K.tile(initial_readout,
                                         (1, ) + tuple(output_shape[1:]))
            initial_states.append(initial_readout)
            if self.teacher_force:
                if ground_truth is None or self._is_optional_input_placeholder(
                        ground_truth):
                    raise Exception(
                        'ground_truth must be provided for RecurrentModel with teacher_force=True.'
                    )
                # counter = K.zeros((1,), dtype='int32')
                counter = K.zeros((1, ))
                counter = K.cast(counter, 'int32')
                initial_states.insert(-1, counter)
                initial_states[-2]
                initial_states.insert(-1, ground_truth)
                num_req_states += 2
        if len(initial_states) != num_req_states:
            raise ValueError('Layer requires ' + str(num_req_states) +
                             ' states but was passed ' +
                             str(len(initial_states)) + ' initial states.')
        input_shape = K.int_shape(inputs)
        if self.unroll and input_shape[1] is None:
            raise ValueError('Cannot unroll a RNN if the '
                             'time dimension is undefined. \n'
                             '- If using a Sequential model, '
                             'specify the time dimension by passing '
                             'an `input_shape` or `batch_input_shape` '
                             'argument to your first layer. If your '
                             'first layer is an Embedding, you can '
                             'also use the `input_length` argument.\n'
                             '- If using the functional API, specify '
                             'the time dimension by passing a `shape` '
                             'or `batch_shape` argument to your Input layer.')
        preprocessed_input = self.preprocess_input(inputs, training=None)
        constants = self.get_constants(inputs, training=None)
        if self.decode:
            initial_states.insert(0, inputs)
            preprocessed_input = K.zeros((1, self.output_length, 1))
            input_length = self.output_length
        else:
            input_length = input_shape[1]
        if self.uses_learning_phase:
            with learning_phase_scope(0):
                last_output_test, outputs_test, states_test, updates = rnn(
                    self.step,
                    preprocessed_input,
                    initial_states,
                    go_backwards=self.go_backwards,
                    mask=mask,
                    constants=constants,
                    unroll=self.unroll,
                    input_length=input_length)
            with learning_phase_scope(1):
                last_output_train, outputs_train, states_train, updates = rnn(
                    self.step,
                    preprocessed_input,
                    initial_states,
                    go_backwards=self.go_backwards,
                    mask=mask,
                    constants=constants,
                    unroll=self.unroll,
                    input_length=input_length)

            last_output = K.in_train_phase(last_output_train,
                                           last_output_test,
                                           training=training)
            outputs = K.in_train_phase(outputs_train,
                                       outputs_test,
                                       training=training)
            states = []
            for state_train, state_test in zip(states_train, states_test):
                states.append(
                    K.in_train_phase(state_train,
                                     state_test,
                                     training=training))

        else:
            last_output, outputs, states, updates = rnn(
                self.step,
                preprocessed_input,
                initial_states,
                go_backwards=self.go_backwards,
                mask=mask,
                constants=constants,
                unroll=self.unroll,
                input_length=input_length)
        states = list(states)
        if self.decode:
            states.pop(0)
        if self.readout:
            states.pop()
            if self.teacher_force:
                states.pop()
                states.pop()
        if len(updates) > 0:
            self.add_update(updates)
        if self.stateful:
            updates = []
            for i in range(len(states)):
                updates.append((self.states[i], states[i]))
            self.add_update(updates, inputs)

        # Properly set learning phase
        if 0 < self.dropout + self.recurrent_dropout:
            last_output._uses_learning_phase = True
            outputs._uses_learning_phase = True

        if self.return_sequences:
            y = outputs
        else:
            y = last_output
        if self.return_states:
            return [y] + states
        else:
            return y
Exemplo n.º 40
0
def repeat_(x, k):
    tile_factor = [1, k] + [1] * (kb.ndim(x) - 1)
    return kb.tile(x[:, None, :], tile_factor)
Exemplo n.º 41
0
 def get_initial_state(self, x):
         initial_state = K.expand_dims(self.h0, 0) # (1, output_dim)
         initial_state = K.tile(initial_state, [x.shape[0], 1])  # (samples, output_dim)
         #initial_states = [initial_state for _ in range(len(self.states))]
         initial_states = [initial_state]
         return initial_states
Exemplo n.º 42
0
    def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_capsule]
        # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
        inputs_expand = K.expand_dims(inputs, 1)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])

        # Compute `inputs * W` by scanning inputs_tiled on dimension 0.
        # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
        # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
        # Regard the first two dimensions as `batch` dimension,
        # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
        # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]),
                              elems=inputs_tiled)
        """
        # Begin: routing algorithm V1, dynamic ------------------------------------------------------------#
        # The prior for coupling coefficient, initialized as zeros.
        b = K.zeros(shape=[self.batch_size, self.num_capsule, self.input_num_capsule])

        def body(i, b, outputs):
            c = tf.nn.softmax(b, dim=1)  # dim=2 is the num_capsule dimension
            outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]))
            if i != 1:
                b = b + K.batch_dot(outputs, inputs_hat, [2, 3])
            return [i-1, b, outputs]

        cond = lambda i, b, inputs_hat: i > 0
        loop_vars = [K.constant(self.num_routing), b, K.sum(inputs_hat, 2, keepdims=False)]
        shape_invariants = [tf.TensorShape([]),
                            tf.TensorShape([None, self.num_capsule, self.input_num_capsule]),
                            tf.TensorShape([None, self.num_capsule, self.dim_capsule])]
        _, _, outputs = tf.while_loop(cond, body, loop_vars, shape_invariants)
        # End: routing algorithm V1, dynamic ------------------------------------------------------------#
        """
        # Begin: Routing algorithm ---------------------------------------------------------------------#
        # In forward pass, `inputs_hat_stopped` = `inputs_hat`;
        # In backward, no gradient can flow from `inputs_hat_stopped` back to `inputs_hat`.
        inputs_hat_stopped = K.stop_gradient(inputs_hat)

        # The prior for coupling coefficient, initialized as zeros.
        # b.shape = [None, self.num_capsule, self.input_num_capsule].
        b = tf.zeros(shape=[
            K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule
        ])

        assert self.num_routing > 0, 'The num_routing should be > 0.'
        for i in range(self.num_routing):
            # c.shape=[batch_size, num_capsule, input_num_capsule]
            c = tf.nn.softmax(b, dim=1)

            # At last iteration, use `inputs_hat` to compute `outputs` in order to backpropagate gradient
            if i == self.num_routing - 1:
                # c.shape =  [batch_size, num_capsule, input_num_capsule]
                # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
                # The first two dimensions as `batch` dimension,
                # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
                # outputs.shape=[None, num_capsule, dim_capsule]
                outputs = squash(K.batch_dot(c, inputs_hat,
                                             [2, 2]))  # [None, 10, 16]
            else:  # Otherwise, use `inputs_hat_stopped` to update `b`. No gradients flow on this path.
                outputs = squash(K.batch_dot(c, inputs_hat_stopped, [2, 2]))

                # outputs.shape =  [None, num_capsule, dim_capsule]
                # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
                # The first two dimensions as `batch` dimension,
                # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
                # b.shape=[batch_size, num_capsule, input_num_capsule]
                b += K.batch_dot(outputs, inputs_hat_stopped, [2, 3])
        # End: Routing algorithm -----------------------------------------------------------------------#

        return outputs
def seq_and_vec(x):
    x, v = x
    v = K.expand_dims(v, 1)
    v = K.tile(v, [1, K.shape(x)[1], 1])
    return K.concatenate([x, v], 2)
    def call(self, inputs, states, training=None):
        """We need to reimplmenet `call` entirely rather than reusing that
        from `GRUCell` since there are lots of differences.

        Args:
            inputs: One tensor which is stacked by 3 inputs (x, m, s)
                x and m are of shape (n_batch * input_dim).
                s is of shape (n_batch, 1).
            states: states and other values from the previous step.
                (h_tm1, x_keep_tm1, s_prev_tm1)
        """
        # Get inputs and states
        input_x = inputs[:, :self.true_input_dim]   # inputs x, m, s
        input_m = inputs[:, self.true_input_dim:-1]
        input_s = inputs[:, -1:]
        # Need to add broadcast for time_stamp if using theano backend.
        if K.backend() == 'theano':
            input_s = K.pattern_broadcast(input_s, [False, True])
        h_tm1, x_keep_tm1, s_prev_tm1 = states
        # previous memory ([n_batch * self.units])
        # previous input x ([n_batch * input_dim])
        # and the subtraction term (of delta_t^d in Equation (2))
        # ([n_batch * input_dim])
        input_1m = K.cast_to_floatx(1.) - input_m
        input_d = input_s - s_prev_tm1

        # Get dropout
        if 0. < self.dropout < 1. and self._dropout_mask is None:
            self._dropout_mask = _generate_dropout_mask(
                K.ones_like(input_x),
                self.dropout,
                training=training,
                count=3)
        if (0. < self.recurrent_dropout < 1. and
                self._recurrent_dropout_mask is None):
            self._recurrent_dropout_mask = _generate_dropout_mask(
                K.ones_like(h_tm1),
                self.recurrent_dropout,
                training=training,
                count=3)
        dp_mask = self._dropout_mask
        rec_dp_mask = self._recurrent_dropout_mask

        if self.feed_masking:
            if 0. < self.dropout < 1. and self._masking_dropout_mask is None:
                self._masking_dropout_mask = _generate_dropout_mask(
                    K.ones_like(input_m),
                    self.dropout,
                    training=training,
                    count=3)
            m_dp_mask = self._masking_dropout_mask

        # Compute decay if any
        if self.input_decay is not None:
            gamma_di = input_d * self.input_decay_kernel
            if self.use_decay_bias:
                gamma_di = K.bias_add(gamma_di, self.input_decay_bias)
            gamma_di = self.input_decay(gamma_di)
        if self.hidden_decay is not None:
            gamma_dh = K.dot(input_d, self.hidden_decay_kernel)
            if self.use_decay_bias:
                gamma_dh = K.bias_add(gamma_dh, self.hidden_decay_bias)
            gamma_dh = self.hidden_decay(gamma_dh)
        if self.feed_masking and self.masking_decay is not None:
            gamma_dm = input_d * self.masking_decay_kernel
            if self.use_decay_bias:
                gamma_dm = K.bias_add(gamma_dm, self.masking_decay_bias)
            gamma_dm = self.masking_decay(gamma_dm)

        # Get the imputed or decayed input if needed
        # and `x_keep_t` for the next time step

        if self.input_decay is not None:
            x_keep_t = K.switch(input_m, input_x, x_keep_tm1)
            x_t = K.switch(input_m, input_x, gamma_di * x_keep_t)
        elif self.x_imputation == 'forward':
            x_t = K.switch(input_m, input_x, x_keep_tm1)
            x_keep_t = x_t
        elif self.x_imputation == 'zero':
            x_t = K.switch(input_m, input_x, K.zeros_like(input_x))
            x_keep_t = x_t
        elif self.x_imputation == 'raw':
            x_t = input_x
            x_keep_t = x_t
        else:
            raise ValueError('No input decay or invalid x_imputation '
                             '{}.'.format(self.x_imputation))

        # Get decayed hidden if needed
        if self.hidden_decay is not None:
            h_tm1d = gamma_dh * h_tm1
        else:
            h_tm1d = h_tm1

        # Get decayed masking if needed
        if self.feed_masking:
            m_t = input_1m
            if self.masking_decay is not None:
                m_t = gamma_dm * m_t

        # Apply the dropout
        if 0. < self.dropout < 1.:
            x_z, x_r, x_h = x_t * dp_mask[0], x_t * dp_mask[1], x_t * dp_mask[2]
            if self.feed_masking:
                m_z, m_r, m_h = (m_t * m_dp_mask[0],
                                 m_t * m_dp_mask[1],
                                 m_t * m_dp_mask[2]
                                )
        else:
            x_z, x_r, x_h = x_t, x_t, x_t
            if self.feed_masking:
                m_z, m_r, m_h = m_t, m_t, m_t
        if 0. < self.recurrent_dropout < 1.:
            h_tm1_z, h_tm1_r = (h_tm1d * rec_dp_mask[0],
                                         h_tm1d * rec_dp_mask[1],
                                        )
        else:
            h_tm1_z, h_tm1_r = h_tm1d, h_tm1d

        # Get z_t, r_t, hh_t
        z_t = K.dot(x_z, self.kernel_z) + K.dot(h_tm1_z, self.recurrent_kernel_z)
        r_t = K.dot(x_r, self.kernel_r) + K.dot(h_tm1_r, self.recurrent_kernel_r)
        hh_t = K.dot(x_h, self.kernel_h)
        if self.feed_masking:
            z_t += K.dot(m_z, self.masking_kernel_z)
            r_t += K.dot(m_r, self.masking_kernel_r)
            hh_t += K.dot(m_h, self.masking_kernel_h)
        if self.use_bias:
            z_t = K.bias_add(z_t, self.input_bias_z)
            r_t = K.bias_add(r_t, self.input_bias_r)
            hh_t = K.bias_add(hh_t, self.input_bias_h)
        z_t = self.recurrent_activation(z_t)
        r_t = self.recurrent_activation(r_t)
        
        if 0. < self.recurrent_dropout < 1.:
            h_tm1_h = r_t * h_tm1d * rec_dp_mask[2]
        else:
            h_tm1_h = r_t * h_tm1d        
        hh_t = self.activation(hh_t + K.dot(h_tm1_h, self.recurrent_kernel_h))

        # get h_t
        h_t = z_t * h_tm1 + (1 - z_t) * hh_t
        if 0. < self.dropout + self.recurrent_dropout:
            if training is None:
                h_t._uses_learning_phase = True

        # get s_prev_t
        s_prev_t = K.switch(input_m, 
                            K.tile(input_s, [1, self.state_size[-1]]),
                            s_prev_tm1)
        return h_t, [h_t, x_keep_t, s_prev_t]
Exemplo n.º 45
0
    def _build_layers_v2(self, input_dict, num_outputs, options):

        TRANSFORMER_SIMPLICIAL_DIM = options["custom_options"][
            "transformer_simplicial_model_dim"]
        TRANSFORMER_MODEL_DIM = options["custom_options"][
            "transformer_model_dim"]
        TRANSFORMER_NUM_HEADS = options["custom_options"][
            "transformer_num_heads"]
        TRANSFORMER_DEPTH = options["custom_options"]["transformer_depth"]
        CONV_PADDING = options["custom_options"]["conv_padding"]
        NUM_VIRTUAL_ENTITIES = options["custom_options"][
            "num_virtual_entities"]

        # For detailed comments see the base agent

        inputs = input_dict["obs"]

        sess = tf.get_default_session()
        K.set_session(sess)

        attention_layer = MultiHeadSelfAttentionZambaldi(
            name='self_attention',
            num_heads=TRANSFORMER_NUM_HEADS,
            use_masking=False,
            dropout=0,
            compression_window_size=None,
            num_virtual_entities=NUM_VIRTUAL_ENTITIES)
        attention_layer_2simplex = MultiHeadSelfAttentionSimplicial(
            name='self_2attention',
            num_heads=1,
            d_simp_model=TRANSFORMER_SIMPLICIAL_DIM,
            use_masking=False,
            dropout=0,
            compression_window_size=None,
            num_virtual_entities=NUM_VIRTUAL_ENTITIES)
        dense_layer1 = layers.Dense(TRANSFORMER_MODEL_DIM, activation='relu')
        dense_layer2 = layers.Dense(TRANSFORMER_MODEL_DIM)

        def transformer_block(input):
            a = LayerNormalization()(input)

            a1 = attention_layer(
                a
            )  # a1 = attention(h) has shape -1, seq_len, TRANSFORMER_MODEL_DIM
            a2 = attention_layer_2simplex(
                a)  # shape -1, seq_len, TRANSFORMER_SIMPLICIAL_DIM

            a2 = LayerNormalization()(a2)

            ac = Concatenate()(
                [a1, a2]
            )  # shape -1, seq_len, TRANSFORMER_MODEL_DIM + TRANSFORMER_SIMPLICIAL_DIM
            b = dense_layer1(ac)
            b2 = dense_layer2(b)  # b = ff(ac)
            r = layers.Add()([input, b2])
            Hprime = LayerNormalization()(r)

            return Hprime

        # CONVOLUTIONS ------
        #
        x = layers.Lambda(lambda x: x / 255)(inputs)  # rescale RGB to [0,1]
        x = layers.Conv2D(12, (2, 2), activation='relu',
                          padding=CONV_PADDING)(x)
        x = layers.Conv2D(24, (2, 2), activation='relu', padding=CONV_PADDING)(
            x)  # output shape -1, num_rows, num_cols, 62
        x = layers.Dense(
            TRANSFORMER_MODEL_DIM - 2, activation=None, use_bias=False
        )(x)  # output shape -1, num_rows, num_cols, TRANSFORMER_MODEL_DIM-2

        # POSITION EMBEDDING -----
        #
        num_rows, num_cols, d_model = x.get_shape().as_list()[-3:]

        ps = np.zeros([num_rows, num_cols, 2],
                      dtype=K.floatx())  # shape (12,13,2)
        for ty in range(num_rows):
            for tx in range(num_cols):
                ps[ty, tx, :] = [(2 / (num_rows - 1)) * ty - 1,
                                 (2 / (num_cols - 1)) * tx - 1]

        ps_expand = K.expand_dims(K.constant(ps),
                                  axis=0)  # shape (1,num_rows,num_cols,2)
        ps_tiled = K.tile(
            ps_expand,
            [K.shape(x)[0], 1, 1, 1])  # shape (None,num_rows,num_cols,2)

        # (None,num_rows,num_cols,62) concatenate with (None,num_rows,num_cols,2)
        # to get (None,num_rows,num_cols,TRANSFORMER_MODEL_DIM)
        x = Concatenate(axis=3)([x, ps_tiled])
        x = layers.Reshape(
            (num_rows * num_cols,
             d_model + 2))(x)  # shape (None, num_rows*num_cols,d_model+2)

        # NOTE: the batch dimension is preserved by reshape, see https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape

        # We now add some virtual entities, which are initialised randomly
        tokens = np.arange(NUM_VIRTUAL_ENTITIES).reshape(
            (1, NUM_VIRTUAL_ENTITIES))  # [[0,1,2,...,NUM_VIRTUAL_ENTITIES]]
        tokens = K.constant(tokens)
        ve = layers.Embedding(
            input_dim=NUM_VIRTUAL_ENTITIES, output_dim=d_model + 2)(
                tokens)  # shape (1,NUM_VIRTUAL_ENTITIES,d_model+2)
        ve_tiled = K.tile(ve, [K.shape(x)[0], 1, 1])
        x = Concatenate(axis=1)([x, ve_tiled])

        # TRANSFORMER -----
        for i in range(TRANSFORMER_DEPTH):
            x = transformer_block(x)

        # The output of the simplicial Transformer includes the virtual entities,
        # which we now want to remove. The current tensor is of shape
        # (None,num_rows*num_cols+NUM_VIRTUAL_ENTITIES,TRANSFORMER_MODEL_DIM)
        x = x[:, :-NUM_VIRTUAL_ENTITIES, :]

        # MAX-POOLING -----
        # from p.4 "The E~ matrix, with shape Nxf is reudced to an f-dimensional vector by max-pooling
        # over the entity dimension. This pooled vector is then passed to a small MLP..."
        num_entities, d_model = x.get_shape().as_list()[-2:]
        x = layers.MaxPooling1D(pool_size=num_entities)(x)
        x = layers.Flatten()(x)

        # FULLY-CONNECTED LAYERS ----
        x = layers.Dense(256, activation='relu')(x)
        x = layers.Dense(256, activation='relu')(x)
        x = layers.Dense(256, activation='relu')(x)
        x = layers.Dense(256, activation='relu')(x)
        output_tensor = layers.Dense(4)(x)  # final output is logits

        return output_tensor, x
Exemplo n.º 46
0
    def call(self, inputs, training=None, mask=None):
        input_shape = K.shape(inputs)

        if self.rank == 1:
            input_shape = [input_shape[i] for i in range(3)]
            batch_shape, dim, channels = input_shape

            xx_range = K.tile(K.expand_dims(K.arange(0, dim), axis=0),
                              K.stack([batch_shape, 1]))
            xx_range = K.expand_dims(xx_range, axis=-1)

            xx_channels = K.cast(xx_range, K.floatx())
            xx_channels = xx_channels / K.cast(dim - 1, K.floatx())
            xx_channels = (xx_channels * 2) - 1.

            outputs = K.concatenate([inputs, xx_channels], axis=self.axis)

        if self.rank == 2:
            if self.data_format == 'channels_first':
                inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])

            input_shape = [input_shape[i] for i in range(4)]
            batch_shape, dim1, dim2, channels = input_shape

            xx_ones = K.ones(K.stack([batch_shape, dim2]), dtype='int32')
            xx_ones = K.expand_dims(xx_ones, axis=-1)

            xx_range = K.tile(K.expand_dims(K.arange(0, dim1), axis=0),
                              K.stack([batch_shape, 1]))
            xx_range = K.expand_dims(xx_range, axis=1)
            xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
            xx_channels = K.expand_dims(xx_channels, axis=-1)
            xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])

            yy_ones = K.ones(K.stack([batch_shape, dim1]), dtype='int32')
            yy_ones = K.expand_dims(yy_ones, axis=1)

            yy_range = K.tile(K.expand_dims(K.arange(0, dim2), axis=0),
                              K.stack([batch_shape, 1]))
            yy_range = K.expand_dims(yy_range, axis=-1)

            yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
            yy_channels = K.expand_dims(yy_channels, axis=-1)
            yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])

            xx_channels = K.cast(xx_channels, K.floatx())
            xx_channels = xx_channels / K.cast(dim1 - 1, K.floatx())
            xx_channels = (xx_channels * 2) - 1.

            yy_channels = K.cast(yy_channels, K.floatx())
            yy_channels = yy_channels / K.cast(dim2 - 1, K.floatx())
            yy_channels = (yy_channels * 2) - 1.

            outputs = K.concatenate([inputs, xx_channels, yy_channels], axis=self.axis)

            if self.use_radius:
                rr = K.sqrt(K.square(xx_channels - 0.5) +
                            K.square(yy_channels - 0.5))
                outputs = K.concatenate([outputs, rr], axis=-1)

            if self.data_format == 'channels_first':
                outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])

        if self.rank == 3:
            if self.data_format == 'channels_first':
                inputs = K.permute_dimensions(inputs, [0, 2, 3, 4, 1])

            input_shape = [input_shape[i] for i in range(5)]
            batch_shape, dim1, dim2, dim3, channels = input_shape

            xx_ones = K.ones(K.stack([batch_shape, dim3]), dtype='int32')
            xx_ones = K.expand_dims(xx_ones, axis=-1)

            xx_range = K.tile(K.expand_dims(K.arange(0, dim2), axis=0),
                              K.stack([batch_shape, 1]))
            xx_range = K.expand_dims(xx_range, axis=1)

            xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
            xx_channels = K.expand_dims(xx_channels, axis=-1)
            xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])

            xx_channels = K.expand_dims(xx_channels, axis=1)
            xx_channels = K.tile(xx_channels,
                                 [1, dim1, 1, 1, 1])

            yy_ones = K.ones(K.stack([batch_shape, dim2]), dtype='int32')
            yy_ones = K.expand_dims(yy_ones, axis=1)

            yy_range = K.tile(K.expand_dims(K.arange(0, dim3), axis=0),
                              K.stack([batch_shape, 1]))
            yy_range = K.expand_dims(yy_range, axis=-1)

            yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
            yy_channels = K.expand_dims(yy_channels, axis=-1)
            yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])

            yy_channels = K.expand_dims(yy_channels, axis=1)
            yy_channels = K.tile(yy_channels,
                                 [1, dim1, 1, 1, 1])

            zz_range = K.tile(K.expand_dims(K.arange(0, dim1), axis=0),
                              K.stack([batch_shape, 1]))
            zz_range = K.expand_dims(zz_range, axis=-1)
            zz_range = K.expand_dims(zz_range, axis=-1)

            zz_channels = K.tile(zz_range,
                                 [1, 1, dim2, dim3])
            zz_channels = K.expand_dims(zz_channels, axis=-1)

            xx_channels = K.cast(xx_channels, K.floatx())
            xx_channels = xx_channels / K.cast(dim2 - 1, K.floatx())
            xx_channels = xx_channels * 2 - 1.

            yy_channels = K.cast(yy_channels, K.floatx())
            yy_channels = yy_channels / K.cast(dim3 - 1, K.floatx())
            yy_channels = yy_channels * 2 - 1.

            zz_channels = K.cast(zz_channels, K.floatx())
            zz_channels = zz_channels / K.cast(dim1 - 1, K.floatx())
            zz_channels = zz_channels * 2 - 1.

            outputs = K.concatenate([inputs, zz_channels, xx_channels, yy_channels],
                                    axis=self.axis)

            if self.data_format == 'channels_first':
                outputs = K.permute_dimensions(outputs, [0, 4, 1, 2, 3])

        return outputs
def load_generator_network(batch_size,
                           sequence_class,
                           n_classes=1,
                           seq_length=205,
                           supply_inputs=False):

    sequence_class_onehots = np.eye(n_classes)

    #Generator network parameters
    latent_size = 100

    #Generator inputs
    latent_input_1, latent_input_2, latent_input_1_out, latent_input_2_out = None, None, None, None
    if not supply_inputs:
        latent_input_1 = Input(tensor=K.ones((batch_size, latent_size)),
                               name='noise_input_1')
        latent_input_2 = Input(tensor=K.ones((batch_size, latent_size)),
                               name='noise_input_2')
        latent_input_1_out = Lambda(lambda inp: inp * K.random_uniform(
            (batch_size, latent_size), minval=-1.0, maxval=1.0),
                                    name='lambda_rand_input_1')(latent_input_1)
        latent_input_2_out = Lambda(lambda inp: inp * K.random_uniform(
            (batch_size, latent_size), minval=-1.0, maxval=1.0),
                                    name='lambda_rand_input_2')(latent_input_2)
    else:
        latent_input_1 = Input(batch_shape=K.ones(batch_size, latent_size),
                               name='noise_input_1')
        latent_input_2 = Input(batch_shape=K.ones(batch_size, latent_size),
                               name='noise_input_2')
        latent_input_1_out = Lambda(lambda inp: inp,
                                    name='lambda_rand_input_1')(latent_input_1)
        latent_input_2_out = Lambda(lambda inp: inp,
                                    name='lambda_rand_input_2')(latent_input_2)

    class_embedding = Lambda(lambda x: K.gather(
        K.constant(sequence_class_onehots), K.cast(x[:, 0], dtype='int32')))(
            sequence_class)

    seed_input_1 = Concatenate(axis=-1)([latent_input_1_out, class_embedding])
    seed_input_2 = Concatenate(axis=-1)([latent_input_2_out, class_embedding])

    #Policy network definition
    policy_dense_1 = Dense(21 * 384,
                           activation='relu',
                           kernel_initializer='glorot_uniform',
                           name='policy_dense_1')

    policy_dense_1_reshape = Reshape((21, 1, 384))

    policy_deconv_0 = Conv2DTranspose(256, (7, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_0')

    policy_deconv_1 = Conv2DTranspose(192, (8, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_1')

    policy_deconv_2 = Conv2DTranspose(128, (7, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_2')

    policy_conv_3 = Conv2D(128, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_3')

    policy_conv_4 = Conv2D(64, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_4')

    policy_conv_5 = Conv2D(4, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_5')

    #policy_deconv_3 = Conv2DTranspose(4, (7, 1), strides=(1, 1), padding='valid', activation='linear', kernel_initializer='glorot_normal', name='policy_deconv_3')

    concat_cond_dense_1 = Lambda(lambda x: K.concatenate([
        x[0],
        K.tile(K.expand_dims(K.expand_dims(x[1], axis=1), axis=1),
               (1, K.shape(x[0])[1], K.shape(x[0])[2], 1))
    ],
                                                         axis=-1),
                                 name='concat_cond_dense_1')

    batch_norm_0 = BatchNormalization(name='policy_batch_norm_0')
    relu_0 = Lambda(lambda x: K.relu(x))
    concat_cond_0 = Lambda(lambda x: K.concatenate([
        x[0],
        K.tile(K.expand_dims(K.expand_dims(x[1], axis=1), axis=1),
               (1, K.shape(x[0])[1], K.shape(x[0])[2], 1))
    ],
                                                   axis=-1),
                           name='concat_cond_0')
    batch_norm_1 = BatchNormalization(name='policy_batch_norm_1')
    relu_1 = Lambda(lambda x: K.relu(x))
    concat_cond_1 = Lambda(lambda x: K.concatenate([
        x[0],
        K.tile(K.expand_dims(K.expand_dims(x[1], axis=1), axis=1),
               (1, K.shape(x[0])[1], K.shape(x[0])[2], 1))
    ],
                                                   axis=-1),
                           name='concat_cond_1')
    batch_norm_2 = BatchNormalization(name='policy_batch_norm_2')
    relu_2 = Lambda(lambda x: K.relu(x))
    concat_cond_2 = Lambda(lambda x: K.concatenate([
        x[0],
        K.tile(K.expand_dims(K.expand_dims(x[1], axis=1), axis=1),
               (1, K.shape(x[0])[1], K.shape(x[0])[2], 1))
    ],
                                                   axis=-1),
                           name='concat_cond_2')

    batch_norm_3 = BatchNormalization(name='policy_batch_norm_3')
    relu_3 = Lambda(lambda x: K.relu(x))
    concat_cond_3 = Lambda(lambda x: K.concatenate([
        x[0],
        K.tile(K.expand_dims(K.expand_dims(x[1], axis=1), axis=1),
               (1, K.shape(x[0])[1], K.shape(x[0])[2], 1))
    ],
                                                   axis=-1),
                           name='concat_cond_3')

    batch_norm_4 = BatchNormalization(name='policy_batch_norm_4')
    relu_4 = Lambda(lambda x: K.relu(x))
    concat_cond_4 = Lambda(lambda x: K.concatenate([
        x[0],
        K.tile(K.expand_dims(K.expand_dims(x[1], axis=1), axis=1),
               (1, K.shape(x[0])[1], K.shape(x[0])[2], 1))
    ],
                                                   axis=-1),
                           name='concat_cond_4')

    dense_1_out_1 = concat_cond_dense_1([
        policy_dense_1_reshape(policy_dense_1(seed_input_1)), class_embedding
    ])

    relu_deconv_0_out_1 = concat_cond_0([
        relu_0(batch_norm_0(policy_deconv_0(dense_1_out_1), training=True)),
        class_embedding
    ])
    relu_deconv_1_out_1 = concat_cond_1([
        relu_1(
            batch_norm_1(policy_deconv_1(relu_deconv_0_out_1), training=True)),
        class_embedding
    ])
    relu_deconv_2_out_1 = concat_cond_2([
        relu_2(
            batch_norm_2(policy_deconv_2(relu_deconv_1_out_1), training=True)),
        class_embedding
    ])
    relu_deconv_3_out_1 = concat_cond_3([
        relu_3(batch_norm_3(policy_conv_3(relu_deconv_2_out_1),
                            training=True)), class_embedding
    ])
    relu_deconv_4_out_1 = concat_cond_4([
        relu_4(batch_norm_4(policy_conv_4(relu_deconv_3_out_1),
                            training=True)), class_embedding
    ])

    policy_out_1 = Reshape(
        (seq_length, 4, 1))(policy_conv_5(relu_deconv_4_out_1))

    dense_1_out_2 = concat_cond_dense_1([
        policy_dense_1_reshape(policy_dense_1(seed_input_2)), class_embedding
    ])

    relu_deconv_0_out_2 = concat_cond_0([
        relu_0(batch_norm_0(policy_deconv_0(dense_1_out_2), training=True)),
        class_embedding
    ])
    relu_deconv_1_out_2 = concat_cond_1([
        relu_1(
            batch_norm_1(policy_deconv_1(relu_deconv_0_out_2), training=True)),
        class_embedding
    ])
    relu_deconv_2_out_2 = concat_cond_2([
        relu_2(
            batch_norm_2(policy_deconv_2(relu_deconv_1_out_2), training=True)),
        class_embedding
    ])
    relu_deconv_3_out_2 = concat_cond_3([
        relu_3(batch_norm_3(policy_conv_3(relu_deconv_2_out_2),
                            training=True)), class_embedding
    ])
    relu_deconv_4_out_2 = concat_cond_4([
        relu_4(batch_norm_4(policy_conv_4(relu_deconv_3_out_2),
                            training=True)), class_embedding
    ])

    policy_out_2 = Reshape(
        (seq_length, 4, 1))(policy_conv_5(relu_deconv_4_out_2))

    return [latent_input_1, latent_input_2], [policy_out_1, policy_out_2], []

###########################################
# Build Model
###########################################

inp = Input(batch_shape=(batch_size, timesteps))
x = Dropout(embedding_drop)(inp)
x = Embedding(vocab_size + 1, embedding_dim, input_length=timesteps)(inp)
x = Dropout(input_drop)(x)

# Create a dropout mask for variational dropout
drop_mask = Lambda(lambda x: x[:, 0, :1] * 0.,
                   output_shape=lambda s: (s[0], 1))(x)

drop_mask = Lambda(lambda x, dim: K.tile(x, (1, dim)),
                   arguments={'dim': hidden_dim},
                   output_shape=(hidden_dim, ))(drop_mask)
drop_mask = Lambda(K.ones_like, output_shape=lambda s: s)(drop_mask)
drop_mask = Dropout(hidden_drop)(drop_mask)
zero_init = Lambda(K.zeros_like, output_shape=lambda s: s)(drop_mask)

x = RHN(embedding_dim, hidden_dim,
        recurrence_depth)(x, initial_state=[zero_init, drop_mask])
x = Dropout(output_drop)(x)
out = Dense(vocab_size + 1, activation='softmax')(x)

model = Model(inputs=[inp], outputs=[out])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
Exemplo n.º 49
0
def expand_label_input(x):
  x = K.expand_dims(x, axis = 1)
  x = K.expand_dims(x, axis = 1)
  x = K.tile(x, [1, 32, 32, 1])
  return x
Exemplo n.º 50
0
def ms_ssim_loss(y_true, y_pred):
    """ Keras ms_ssim loss """
    msssim = K.expand_dims(K.expand_dims(1.0 - ms_ssim(y_true, y_pred), axis=-1), axis=-1)
    # need to expand to [1, height, width] dimensions for Keras ... modify to not be hard-coded
    return K.tile(msssim, [1, 64, 64])
    def call(self, x, mask=None):
        '''
        Return an anchor box tensor based on the shape of the input tensor.

        The logic implemented here is identical to the logic in the module `ssd_box_encode_decode_utils.py`.

        Note that this tensor does not participate in any graph computations at runtime. It is being created
        as a constant once during graph creation and is just being output along with the rest of the model output
        during runtime. Because of this, all logic is implemented as Numpy array operations and it is sufficient
        to convert the resulting Numpy array into a Keras tensor at the very end before outputting it.

        Arguments:
            x (tensor): 4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
                or `(batch, height, width, channels)` if `dim_ordering = 'tf'`. The input for this
                layer must be the output of the localization predictor layer.
        '''

        # Compute box width and height for each aspect ratio
        # The shorter side of the image will be used to compute `w` and `h` using `scale` and `aspect_ratios`.
        size = min(self.img_height, self.img_width)
        # Compute the box widths and and heights for all aspect ratios
        wh_list = []
        for ar in self.aspect_ratios:
            if (ar == 1):
                # Compute the regular anchor box for aspect ratio 1.
                box_height = box_width = self.this_scale * size
                wh_list.append((box_width, box_height))
                if self.two_boxes_for_ar1:
                    # Compute one slightly larger version using the geometric mean of this scale value and the next.
                    box_height = box_width = np.sqrt(self.this_scale * self.next_scale) * size
                    wh_list.append((box_width, box_height))
            else:
                box_height = self.this_scale * size / np.sqrt(ar)
                box_width = self.this_scale * size * np.sqrt(ar)
                wh_list.append((box_width, box_height))
        wh_list = np.array(wh_list)

        # We need the shape of the input tensor
        if K.image_dim_ordering() == 'tf':
            batch_size, feature_map_height, feature_map_width, feature_map_channels = x._keras_shape
        else:  # Not yet relevant since TensorFlow is the only supported backend right now, but it can't harm to have this in here for the future
            batch_size, feature_map_channels, feature_map_height, feature_map_width = x._keras_shape

        # Compute the grid of box center points. They are identical for all aspect ratios.

        # Compute the step sizes, i.e. how far apart the anchor box center points will be vertically and horizontally.
        if (self.this_steps is None):
            step_height = self.img_height / feature_map_height
            step_width = self.img_width / feature_map_width
        else:
            if isinstance(self.this_steps, (list, tuple)) and (len(self.this_steps) == 2):
                step_height = self.this_steps[0]
                step_width = self.this_steps[1]
            elif isinstance(self.this_steps, (int, float)):
                step_height = self.this_steps
                step_width = self.this_steps
        # Compute the offsets, i.e. at what pixel values the first anchor box center point will be from the top and from the left of the image.
        if (self.this_offsets is None):
            offset_height = 0.5
            offset_width = 0.5
        else:
            if isinstance(self.this_offsets, (list, tuple)) and (len(self.this_offsets) == 2):
                offset_height = self.this_offsets[0]
                offset_width = self.this_offsets[1]
            elif isinstance(self.this_offsets, (int, float)):
                offset_height = self.this_offsets
                offset_width = self.this_offsets
        # Now that we have the offsets and step sizes, compute the grid of anchor box center points.
        cy = np.linspace(offset_height * step_height, (offset_height + feature_map_height - 1) * step_height,
                         feature_map_height)
        cx = np.linspace(offset_width * step_width, (offset_width + feature_map_width - 1) * step_width,
                         feature_map_width)
        cx_grid, cy_grid = np.meshgrid(cx, cy)
        cx_grid = np.expand_dims(cx_grid, -1)  # This is necessary for np.tile() to do what we want further down
        cy_grid = np.expand_dims(cy_grid, -1)  # This is necessary for np.tile() to do what we want further down

        # Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
        # where the last dimension will contain `(cx, cy, w, h)`
        boxes_tensor = np.zeros((feature_map_height, feature_map_width, self.n_boxes, 4))

        boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes))  # Set cx
        boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes))  # Set cy
        boxes_tensor[:, :, :, 2] = wh_list[:, 0]  # Set w
        boxes_tensor[:, :, :, 3] = wh_list[:, 1]  # Set h

        # Convert `(cx, cy, w, h)` to `(xmin, xmax, ymin, ymax)`
        boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='centroids2corners')

        # If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
        if self.clip_boxes:
            x_coords = boxes_tensor[:, :, :, [0, 2]]
            x_coords[x_coords >= self.img_width] = self.img_width - 1
            x_coords[x_coords < 0] = 0
            boxes_tensor[:, :, :, [0, 2]] = x_coords
            y_coords = boxes_tensor[:, :, :, [1, 3]]
            y_coords[y_coords >= self.img_height] = self.img_height - 1
            y_coords[y_coords < 0] = 0
            boxes_tensor[:, :, :, [1, 3]] = y_coords

        # If `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
        if self.normalize_coords:
            boxes_tensor[:, :, :, [0, 2]] /= self.img_width
            boxes_tensor[:, :, :, [1, 3]] /= self.img_height

        # TODO: Implement box limiting directly for `(cx, cy, w, h)` so that we don't have to unnecessarily convert back and forth.
        if self.coords == 'centroids':
            # Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`.
            boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2centroids')
        elif self.coords == 'minmax':
            # Convert `(xmin, ymin, xmax, ymax)` to `(xmin, xmax, ymin, ymax).
            boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2minmax')

        # Create a tensor to contain the variances and append it to `boxes_tensor`. This tensor has the same shape
        # as `boxes_tensor` and simply contains the same 4 variance values for every position in the last axis.
        variances_tensor = np.zeros_like(
            boxes_tensor)  # Has shape `(feature_map_height, feature_map_width, n_boxes, 4)`
        variances_tensor += self.variances  # Long live broadcasting
        # Now `boxes_tensor` becomes a tensor of shape `(feature_map_height, feature_map_width, n_boxes, 8)`
        boxes_tensor = np.concatenate((boxes_tensor, variances_tensor), axis=-1)

        # Now prepend one dimension to `boxes_tensor` to account for the batch size and tile it along
        # The result will be a 5D tensor of shape `(batch_size, feature_map_height, feature_map_width, n_boxes, 8)`
        boxes_tensor = np.expand_dims(boxes_tensor, axis=0)
        boxes_tensor = K.tile(K.constant(boxes_tensor, dtype='float32'), (K.shape(x)[0], 1, 1, 1, 1))

        return boxes_tensor
Exemplo n.º 52
0
def yolo_head(feats, anchors, num_classes):
    """Convert final layer features to bounding box parameters.

    Parameters
    ----------
    feats : tensor
        Final convolutional layer features.
    anchors : array-like
        Anchor box widths and heights.
    num_classes : int
        Number of target classes.

    Returns
    -------
    box_xy : tensor
        x, y box predictions adjusted by spatial location in conv layer.
    box_wh : tensor
        w, h box predictions adjusted by anchors and conv spatial resolution.
    box_conf : tensor
        Probability estimate for whether each box contains any object.
    box_class_pred : tensor
        Probability distribution estimate for each box over class labels.
    """
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])

    # Static implementation for fixed models.
    # TODO: Remove or add option for static implementation.
    # _, conv_height, conv_width, _ = K.int_shape(feats)
    # conv_dims = K.variable([conv_width, conv_height])

    # Dynamic implementation of conv dims for fully convolutional model.
    conv_dims = K.shape(feats)[1:3]  # assuming channels last
    # In YOLO the height index is the inner most iteration.
    conv_height_index = K.arange(0, stop=conv_dims[0])
    conv_width_index = K.arange(0, stop=conv_dims[1])
    conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

    # TODO: Repeat_elements and tf.split doesn't support dynamic splits.
    # conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)
    conv_width_index = K.tile(
        K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    conv_index = K.cast(conv_index, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

    # Static generation of conv_index:
    # conv_index = np.array([_ for _ in np.ndindex(conv_width, conv_height)])
    # conv_index = conv_index[:, [1, 0]]  # swap columns for YOLO ordering.
    # conv_index = K.variable(
    #     conv_index.reshape(1, conv_height, conv_width, 1, 2))
    # feats = Reshape(
    #     (conv_dims[0], conv_dims[1], num_anchors, num_classes + 5))(feats)

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.softmax(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    box_xy = (box_xy + conv_index) / conv_dims
    box_wh = box_wh * anchors_tensor / conv_dims

    return box_xy, box_wh, box_confidence, box_class_probs
def build_generator(seq_length,
                    n_sequences=1,
                    n_samples=None,
                    batch_normalize_pwm=False,
                    pwm_transform_func=None,
                    validation_sample_mode='max',
                    master_generator=None):

    use_samples = True
    if n_samples is None:
        use_samples = False
        n_samples = 1

    #Seed input for all dense/embedding layers
    ones_input = Input(tensor=K.ones((1, 1)), name='seed_input')

    #Initialize a Lambda layer to reshape flat matrices into PWM tensors
    reshape_layer = Lambda(
        lambda x: K.reshape(x, (n_sequences, seq_length, 20, 1)),
        name='onehot_reshape')

    #Initialize Template, Masking and Trainable PWMs
    dense_seq_layer = Dense(n_sequences * seq_length * 20,
                            use_bias=False,
                            kernel_initializer='glorot_uniform',
                            name='policy_pwm')

    if master_generator is not None:
        dense_seq_layer = master_generator.get_layer('policy_pwm')

    #Get Template, Mask and Trainable PWM logits
    onehot_logits = reshape_layer(dense_seq_layer(ones_input))

    #Batch Normalize PWM Logits
    if batch_normalize_pwm:
        pwm_norm_layer = InstanceNormalization(axis=-2,
                                               name='policy_batch_norm')
        if master_generator is not None:
            pwm_norm_layer = master_generator.get_layer('policy_batch_norm')
        onehot_logits = pwm_norm_layer(onehot_logits)

    #Add Template and Multiply Mask
    pwm_logits = onehot_logits

    #Get PWM from logits
    pwm = Softmax(axis=-2, name='pwm')(pwm_logits)

    #Optionally tile each PWM to sample from
    if use_samples:
        pwm_logits = Lambda(lambda x: K.tile(x, [n_samples, 1, 1, 1]))(
            pwm_logits)

    #Sample proper One-hot coded sequences from PWMs
    if validation_sample_mode == 'max':
        sampled_pwm = Lambda(sample_pwm, name='pwm_sampler')(pwm_logits)
    elif validation_sample_mode == 'gumbel':
        sampled_pwm = Lambda(sample_gumbel, name='pwm_sampler')(pwm_logits)
    elif validation_sample_mode == 'simple_sample':
        sampled_pwm = Lambda(sample_pwm_simple, name='pwm_sampler')(pwm_logits)
    else:
        sampled_pwm = Lambda(sample_pwm_only, name='pwm_sampler')(pwm_logits)

    #PWM & Sampled One-hot custom transform function
    if pwm_transform_func is not None:
        pwm = Lambda(lambda pwm_seq: pwm_transform_func(pwm_seq))(pwm)
        sampled_pwm = Lambda(lambda pwm_seq: pwm_transform_func(pwm_seq))(
            sampled_pwm)

    #Optionally create sample axis
    if use_samples:
        sampled_pwm = Lambda(lambda x: K.reshape(x, (
            n_samples, n_sequences, seq_length, 20, 1)))(sampled_pwm)

    generator_model = Model(
        inputs=[
            ones_input  #Dummy Seed Input
        ],
        outputs=[
            pwm_logits,  #Logits of the Templated and Masked PWMs
            pwm,  #Templated and Masked PWMs
            sampled_pwm  #Sampled One-hot sequences (n_samples per trainable PWM)
        ])

    #Lock all generator layers except policy layers
    for generator_layer in generator_model.layers:
        generator_layer.trainable = False

        if 'policy' in generator_layer.name:
            generator_layer.trainable = True

    return 'seqprop_generator', generator_model