Example #1
0
def GRU(dim, x):
    # Learnable weights in the cell
    Wzx = layers.Dense(dim)
    Wzh = layers.Dense(dim, use_bias=False)

    Wrx = layers.Dense(dim)
    Wrh = layers.Dense(dim, use_bias=False)

    Wx = layers.Dense(dim)
    Wh = layers.Dense(dim, use_bias=False)

    # unstacking the time axis
    x = tf.unstack(x, axis=1)

    H = []

    h = tf.zeros_like(Wx(x[0]))

    for i in range(len(x)):
        # -- missing code --
        z = sigmoid(Wzx(x[i]) + Wzh(h))
        r = sigmoid(Wrx(x[i]) + Wrh(h))
        ht = tanh(Wx(x[i]) + Wh(h) * r)
        h = (1 - z) * h + z * ht

        H.append(h)

    H = tf.stack(H, axis=1)

    return h, H
    def call(self, inputs, **kwargs):
        """

        run a vector through the implementation

        :param inputs:
        :return:
        """

        tensors_to_concat = []

        # go through all the anchor boxes
        for i in range(constants.anchor_boxes):

            # compute the base of this anchor box
            anchor_base = i * self.output_channels

            start = sigmoid(inputs[:, :, :, anchor_base:anchor_base + 3])
            middle = inputs[:, :, :, anchor_base + 3:anchor_base + 5]
            end = sigmoid(inputs[:, :, :, anchor_base + 5:anchor_base +
                                 self.output_channels])

            # apply sigmoid activation to all but the width and height (items 3 and 4)
            tensors_to_concat.append(start)
            tensors_to_concat.append(middle)
            tensors_to_concat.append(end)

        # concatenate the three
        return concat(tensors_to_concat, axis=-1)
Example #3
0
    def call(self, x):
        raw_m, raw_l = x

        sig_m = activations.sigmoid(raw_m)
        sig_l = activations.sigmoid(raw_l)

        # Dim(batch, grid, grid, 5 + num_classes)
        sig_m = tf.split(sig_m, 3, axis=-1)
        raw_m = tf.split(raw_m, 3, axis=-1)
        sig_l = tf.split(sig_l, 3, axis=-1)
        raw_l = tf.split(raw_l, 3, axis=-1)

        for i in range(3):
            txty_m, _, conf_prob_m = tf.split(sig_m[i], (2, 2, -1), axis=-1)
            _, twth_m, _ = tf.split(raw_m[i], (2, 2, -1), axis=-1)
            txty_m = (txty_m - 0.5) * self.scales[0] + 0.5
            bxby_m = (txty_m + self.grid_coord[0]) / self.grid_size[0]
            bwbh_m = (self.anchors[0][i] /
                      self.image_size) * backend.exp(twth_m)
            sig_m[i] = tf.concat([bxby_m, bwbh_m, conf_prob_m], axis=-1)

            txty_l, _, conf_prob_l = tf.split(sig_l[i], (2, 2, -1), axis=-1)
            _, twth_l, _ = tf.split(raw_l[i], (2, 2, -1), axis=-1)
            txty_l = (txty_l - 0.5) * self.scales[1] + 0.5
            bxby_l = (txty_l + self.grid_coord[1]) / self.grid_size[1]
            bwbh_l = (self.anchors[1][i] /
                      self.image_size) * backend.exp(twth_l)
            sig_l[i] = tf.concat([bxby_l, bwbh_l, conf_prob_l], axis=-1)

        # Dim(batch, grid, grid, 3 * (5 + num_classes))
        pred_m = tf.concat(sig_m, axis=-1)
        pred_l = tf.concat(sig_l, axis=-1)

        return pred_m, pred_l
Example #4
0
    def step(self, x, states):

        ytm, stm = states

        # repeat the hidden state to the length of the sequence
        _stm = K.repeat(stm, self.timesteps)

        # now multiplty the weight matrix with the repeated hidden state
        _Wxstm = K.dot(_stm, self.W_a)

        # calculate the attention probabilities
        # this relates how much other timesteps contributed to this one.
        et = K.dot(activations.tanh(_Wxstm + self._uxpb),
                   K.expand_dims(self.V_a))
        at = K.exp(et)
        at_sum = K.sum(at, axis=1)
        at_sum_repeated = K.repeat(at_sum, self.timesteps)
        at /= at_sum_repeated  # vector of size (batchsize, timesteps, 1)

        # calculate the context vector
        context = K.squeeze(K.batch_dot(at, self.x_seq, axes=1), axis=1)
        # ~~~> calculate new hidden state
        # first calculate the "r" gate:

        rt = activations.sigmoid(
            K.dot(ytm, self.W_r)
            + K.dot(stm, self.U_r)
            + K.dot(context, self.C_r)
            + self.b_r)

        # now calculate the "z" gate
        zt = activations.sigmoid(
            K.dot(ytm, self.W_z)
            + K.dot(stm, self.U_z)
            + K.dot(context, self.C_z)
            + self.b_z)

        # calculate the proposal hidden state:
        s_tp = activations.tanh(
            K.dot(ytm, self.W_p)
            + K.dot((rt * stm), self.U_p)
            + K.dot(context, self.C_p)
            + self.b_p)

        # new hidden state:
        st = (1-zt)*stm + zt * s_tp

        yt = activations.softmax(
            K.dot(ytm, self.W_o)
            + K.dot(stm, self.U_o)
            + K.dot(context, self.C_o)
            + self.b_o)

        if self.return_probabilities:
            return at, [yt, st]
        else:
            return yt, [yt, st]
Example #5
0
    def call(self, y):
        yTime = self.time(y[:, 0:1])
        yPlaceA = self.placeA(y[:, 1:3]) * y[:, 3:4]
        yPlaceB = self.placeB(y[:, 1:3]) * y[:, 4:5]
        yPlaceC = self.placeC(y[:, 1:3]) * y[:, 5:6]
        yPlace = activations.sigmoid(
            (yPlaceA + yPlaceB + yPlaceC - 0.5) * 10)  # OR
        yHeadGear = self.headGear(y[:, 6:8])

        return activations.sigmoid(
            (yTime + yPlace + yHeadGear - 2.5) * 10)  # AND
Example #6
0
    def call(self, x, states):
        
        # states: [BATCH, 3 (left, top, diagonal)]
        
        left_state = states[0] #(BATCH, RECURRENT_DIM)
        top_state = states[1] #(BATCH, RECURRENT_DIM)
        diagonal_state = states[2] #(BATCH, RECURRENT_DIM)
        
        q_vec = tf.concat([left_state, top_state, diagonal_state, x], axis=1) # [BATCH, 3*RECURRENT_DIM + INPUT_DIM]
        
                     # [BATCH, RECURRENT_DIM]  [1, RECURRENT_DIM]
        reset_left = K.bias_add(K.dot(q_vec, self.w_rl), self.b_rl)
        reset_left = sigmoid(reset_left) # [BATCH, RECURRENT_DIM]
        
        reset_top = K.bias_add(K.dot(q_vec, self.w_rt), self.b_rt)
        reset_top = sigmoid(reset_top) # [BATCH, RECURRENT_DIM]
        
        reset_diagonal = K.bias_add(K.dot(q_vec, self.w_rd), self.b_rd)
        reset_diagonal = sigmoid(reset_diagonal) # [BATCH, RECURRENT_DIM]

        reset = tf.concat([reset_left, reset_top, reset_diagonal], axis=1) # [BATCH, 3*RECURRENT_DIM]
        
        _z_input = K.bias_add(K.dot(q_vec, self.w_zi), self.b_zi) # [BATCH, RECURRENT_DIM]
        _z_left = K.bias_add(K.dot(q_vec, self.w_zl), self.b_zl) # [BATCH, RECURRENT_DIM]
        _z_top = K.bias_add(K.dot(q_vec, self.w_zt), self.b_zt) # [BATCH, RECURRENT_DIM]
        _z_diagonal = K.bias_add(K.dot(q_vec, self.w_zd), self.b_zd) # [BATCH, RECURRENT_DIM]
        
        _z_input = tf.expand_dims(_z_input, axis=-1)
        _z_left = tf.expand_dims(_z_left, axis=-1)
        _z_top = tf.expand_dims(_z_top, axis=-1)
        _z_diagonal = tf.expand_dims(_z_diagonal, axis=-1)
        
        _z = tf.concat([_z_input, _z_left, _z_top, _z_diagonal], axis=-1)
        _z = K.softmax(_z, axis=-1)
        
        # each will have dims # [BATCH, RECURRENT_DIM]
        z = tf.split(_z, num_or_size_splits=4, axis=-1) 
        z_input = K.squeeze(z[0], axis=-1)
        z_left = K.squeeze(z[1], axis=-1)
        z_top = K.squeeze(z[2], axis=-1)
        z_diagonal = K.squeeze(z[3], axis=-1)
        
        # compute candite hidden space
        _states = tf.concat([left_state, top_state, diagonal_state], axis=1) # [BATCH, 3*RECURRENT_DIM]
        reset_states = reset * _states # reset the hidden states # [BATCH, 3*RECURRENT_DIM]
        _h_reset_states = K.dot(reset_states, self.u) # [BATCH, RECURRENT_DIM]
        _h = K.bias_add(K.dot(x, self.w_i), self.b_i) # [BATCH, RECURRENT_DIM]
        _h = _h + _h_reset_states
        _h = self.activation(_h)

        h = z_left * left_state + z_top * top_state + z_diagonal * diagonal_state + z_input * _h

        # OUTPUT [BATCH, Features]
        return h#tf.random.normal((K.shape(x)[0], self.state_size))
Example #7
0
 def call(self, c1, c2, c3, x):  # x channels:256 aspp output
     fourth_feature = self.first_conv(c1)  # 1/4
     eighth_feature = self.second_conv(c2)  # 1/8
     sixteenth_feature = self.third_conv(c3)  # 1/16
     x = self.conv(layers.concatenate((x, sixteenth_feature), axis=1))
     x = layers.UpSampling2D(eighth_feature.size()[2:], interpolation='bilinear')(x)  # upsample to 1/8
     x = self.gamma * activations.sigmoid(self.att(x)) * x
     x = self.conv(layers.concatenate((x, eighth_feature), axis=1))
     x = layers.UpSampling2D(fourth_feature.size()[2:], interpolation='bilinear')(x)  # upsample to 1/4
     x = self.gamma * activations.sigmoid(self.att(x)) * x
     x = self.last_conv(layers.concatenate((x, fourth_feature), axis=1))  # default channel last, may change
     return x
 def forward_pass(self):
     self.list_activations = []
     self.z = []
     self.z.append(np.dot(self.weights[0], self.X) + self.biases[0])
     print(self.z[0].shape)
     self.list_activations.append(sigmoid(self.z[0]))
     for i in range(1, len(self.weights)):
         z_out = np.dot(self.weights[i], self.list_activations[-1]) + self.biases[i]
         self.z.append(z_out)
         a_out = sigmoid(z_out)
         self.list_activations.append(a_out)
     return [n.shape for n in self.z], [n.shape for n in self.list_activations]
 def call(self, inputs, training=False):
     """ Implements the feed forward part of the network """
     x = inputs
     for layer in self.layers_[:-1]:
         x = layer(x)
         x = sigmoid(x)
     x = self.layers_[-1](x)
     if self.decoder:
         x = tanh(x)
     else:
         x = sigmoid(x)
     return x
Example #10
0
    def call(self, inputs, training=False):
        x, a, i = inputs
        glob_avg = tf.math.segment_mean(x, i)
        glob_var = abs(
            tf.math.subtract(tf.math.segment_mean(multiply([x, x]), i),
                             multiply([glob_avg, glob_avg])))
        glob_max = tf.math.segment_max(x, i)
        glob_min = tf.math.segment_min(x, i)
        xglob = tf.concat([glob_avg, glob_var, glob_max, glob_min], axis=1)
        a, e = self.generate_edge_features(x, a)
        for MP in self.MPs:
            x = MP([x, a, e])
        for conv in self.GCNs:
            x = conv([x, a])
        x1 = self.Pool1([x, i])
        x2 = self.Pool2([x, i])
        x3 = self.Pool3([x, i])
        x = tf.concat([x1, x2, x3], axis=1)
        x = tf.concat([x, xglob], axis=1)
        for decode_layer, dropout_layer, norm_layer in zip(
                self.decode, self.dropout_layers, self.norm_layers):
            x = dropout_layer(x, training=training)
            x = self.decode_activation(decode_layer(x))
            x = norm_layer(x, training=training)

        x_loge = self.loge[0](x)
        x_loge = self.loge[1](x_loge)
        x_loge = self.loge_out(x_loge)

        x_zeni = self.zeni[0](x)
        x_zeni = self.zeni[1](x_zeni)
        x_zeni = self.zeni_out(x_zeni)
        zeni = sigmoid(self.zeni_scale(x_zeni))

        x_azi = self.azi[0](x)
        x_azi = self.azi[1](x_azi)
        x_azi = self.azi_out(x_azi)
        azi = sigmoid(self.azi_scale(x_azi))

        x_sigz = self.sigz[0](x)
        x_sigz = self.sigz[1](x_sigz)
        x_sigz = tf.math.add(tf.math.abs(self.sigz_out(x_sigz)), eps)

        x_sigaz = self.sigaz[0](x)
        x_sigaz = self.sigaz[1](x_sigaz)
        x_sigaz = tf.math.add(tf.math.abs(self.sigaz_out(x_sigaz)), eps)

        #could add correlation here

        xs = tf.stack([x_loge, zeni * np.pi, azi * 2 * np.pi, x_sigz, x_sigaz],
                      axis=1)

        return xs[:, :, 0]
Example #11
0
    def call(self, inputs, training=False):
        x, a, i = inputs
        if self.edgeconv:
            a, e = self.generate_edge_features(x, a)
            x = self.ECC1([x, a, e])
        for GCN_layer in self.GCNs:
            x = GCN_layer([x, a])
        x1 = self.Pool1([x, i])
        x2 = self.Pool2([x, i])
        x3 = self.Pool3([x, i])
        # tf.print(x1,x2,x3, x1.shape, x2.shape, x3)
        x = tf.concat([x1, x2, x3], axis=1)
        for decode_layer, dropout_layer, norm_layer in zip(
                self.decode, self.dropout_layers, self.norm_layers):
            x = dropout_layer(x, training=training)
            x = self.decode_activation(decode_layer(x))
            x = norm_layer(x, training=training)
        x = self.final(x)

        zeniazi = x[:, 1:3]
        zeniazi = sigmoid(self.angle_scale(zeniazi))
        # tf.print(x)
        x1 = tf.stack(
            [x[:, 0], zeniazi[:, 0] * np.pi, zeniazi[:, 1] * 2 * np.pi],
            axis=1)
        x = tf.concat([x1, x[:, 3:]], axis=1)
        return x
def ACM(x, blockname, groups=32):
    b, w, h, c = K.int_shape(x)
    mu = tf.reduce_mean(x, axis=[1, 2], name=blockname + '_mu')
    mu = tf.expand_dims(mu, axis=1)
    mu = tf.expand_dims(mu, axis=1)
    P = Conv2D(c // 2,
               1,
               padding='same',
               groups=groups,
               name=blockname + '_P1')(mu)
    P = relu(P)
    P = Conv2D(c, 1, padding='same', groups=groups, name=blockname + '_P2')(P)
    P = sigmoid(P)

    x_mu = x - mu
    k = Conv2D(c, 1, padding='same', groups=groups,
               name=blockname + '_K')(x_mu)
    q = Conv2D(c, 1, padding='same', groups=groups,
               name=blockname + '_Q')(x_mu)
    k = softmax(k)
    q = softmax(q)
    k = x_mu * k
    q = x_mu * q
    k = K.sum(k, axis=[1, 2])
    q = K.sum(q, axis=[1, 2])
    k_q = k - q
    y = x + k_q
    y = y * P

    return y
Example #13
0
def plt_layer(X, Y, W1, b1, norm_l):
    Y = Y.reshape(-1, )
    fig, ax = plt.subplots(1, W1.shape[1], figsize=(16, 4))
    for i in range(W1.shape[1]):
        layerf = lambda x: sigmoid(np.dot(norm_l(x), W1[:, i]) + b1[i])
        plt_prob(ax[i], layerf)
        ax[i].scatter(X[Y == 1, 0],
                      X[Y == 1, 1],
                      s=70,
                      marker='x',
                      c='red',
                      label="Good Roast")
        ax[i].scatter(X[Y == 0, 0],
                      X[Y == 0, 1],
                      s=100,
                      marker='o',
                      facecolors='none',
                      edgecolors=dlc["dldarkblue"],
                      linewidth=1,
                      label="Bad Roast")
        tr = np.linspace(175, 260, 50)
        ax[i].plot(tr, (-3 / 85) * tr + 21, color=dlc["dlpurple"], linewidth=2)
        ax[i].axhline(y=12, color=dlc["dlpurple"], linewidth=2)
        ax[i].axvline(x=175, color=dlc["dlpurple"], linewidth=2)
        ax[i].set_title(f"Layer 1, unit {i}")
        ax[i].set_xlabel("Temperature \n(Celsius)", size=12)
    ax[0].set_ylabel("Duration \n(minutes)", size=12)
    plt.show()
Example #14
0
def predict(model, input_model):

    log_reg_weights = model.get_layer("output").get_weights()[0]
    log_reg_bias = model.get_layer("output").get_weights()[1][0]

    outputs = []
    shapes = []
    weights = []

    layers_names = [layer.name for layer in model.layers]

    consumed = 0

    for name in ["input_bool", "reshape_num_output", "reshape_cat_output"]:
        if name not in layers_names:
            continue
        layer = model.get_layer(name)
        outputs.append(layer.output)
        input_shape = layer.input_shape
        if isinstance(input_shape, list):
            input_shape = input_shape[0]

        nb_channel = input_shape[-1] if len(input_shape) > 2 else 1
        nb_features = input_shape[-2] if len(input_shape) > 2 else input_shape[-1]
        nb_weights = nb_channel * nb_features
        weights.append(
            log_reg_weights[consumed : consumed + nb_weights].reshape(
                nb_features, nb_channel
            )
        )
        shapes.append((nb_features, nb_channel))
        consumed += nb_weights

    explainable_model = Model(inputs=[model.input], outputs=[model.output, *outputs],)

    predictions = explainable_model.predict(input_model)
    probas = predictions[0]
    aggregated_explanation = []

    for weight_slice, shape_feat, raw_explanation in zip(
        weights, shapes, predictions[1:]
    ):
        reshaped_expl = raw_explanation.reshape(-1, shape_feat[0], shape_feat[1])
        reshaped_weights = weight_slice.reshape(1, *weight_slice.shape)
        feature_explanation = (
            (reshaped_expl * reshaped_weights).sum(axis=-1).reshape(-1, shape_feat[0])
        )
        aggregated_explanation.append(feature_explanation)

    aggregated_explanation = np.hstack(aggregated_explanation)

    results = np.zeros(aggregated_explanation.shape)
    for idx in range(aggregated_explanation.shape[1]):
        expla_cpy = np.copy(aggregated_explanation)
        expla_cpy[:, idx] = 0
        results[:, idx] = probas.reshape(-1) - sigmoid(
            expla_cpy.sum(axis=-1) + log_reg_bias
        ).numpy().reshape(-1)

    return probas, results
Example #15
0
    def __call__(self, tf_X):
        h1 = LeakyReLU(alpha=self.leakiness)(self.Bnorm1(self.conv1(tf_X)))
        print(h1.shape)
        h2 = LeakyReLU(alpha=self.leakiness)(self.Bnorm2(self.conv2(h1)))
        print(h2.shape)
        h3 = LeakyReLU(alpha=self.leakiness)(self.Bnorm3(self.conv3(h2)))
        print(h3.shape)
        h4 = LeakyReLU(alpha=self.leakiness)(self.Bnorm4(self.conv4(h3)))
        print(h4.shape)
        h5 = LeakyReLU(alpha=self.leakiness)(self.Bnorm5(self.conv5(h4)))
        print(h5.shape)
        h6 = LeakyReLU(alpha=self.leakiness)(self.Bnorm6(self.conv6(h5)))
        print(h6.shape)
        dh1 = ReLU()(self.Dropout1(self.deBnorm1(self.deconv1(h6))))
        print(dh1.shape)
        dh2 = ReLU()(self.Dropout2(
            self.deBnorm2(self.deconv2(concatenate([dh1, h5])))))
        print(dh2.shape)
        dh3 = ReLU()(self.Dropout3(
            self.deBnorm3(self.deconv3(concatenate([dh2, h4])))))
        print(dh3.shape)
        dh4 = ReLU()(self.deBnorm4(self.deconv4(concatenate([dh3, h3]))))
        print(dh4.shape)
        dh5 = ReLU()(self.deBnorm5(self.deconv5(concatenate([dh4, h2]))))
        print(dh5.shape)
        dh6 = sigmoid(self.deconv6(concatenate([dh5, h1])))
        print(dh6.shape)

        return dh6
Example #16
0
 def call(self, y):
     y = tf.expand_dims(y, axis=1)
     y = tf.exp(-tf.reduce_sum(tf.square(self.refPoints - y), axis=2) /
                self.twiceSigmaSquare)
     y = self.dense(y)
     y = activations.sigmoid(y)
     return y
def channel_attention(inputs):
    """Channel Attention Map calculation

    The function aims to implement a simple
    version of a Channel Attention, whose output
    is a tensor that will weight each channel of the
    input.

    Args:
        inputs: Input tensor, above which the channel
        map will be calculated

    Returns:
        Channel Attention map
    """

    max_features = GlobalMaxPool2D()(inputs)
    avg_features = GlobalAvgPool2D()(inputs)

    extracted_max = extraction_network(max_features)
    extracted_avg = extraction_network(avg_features)

    merge = Add()[extracted_avg, extracted_max]

    return reshape(sigmoid(merge), (-1, 1, 1, inputs.shape[3]))
  def __call__(self, tf_X, tf_mr1_X, tf_mr2_X):
      mask_list = []
      h1 = LeakyReLU(alpha = self.leakiness)(self.Bnorm1(self.conv1(tf_X)))
      mr1_h1 = LeakyReLU(alpha=self.leakiness)(self.mr1_Bnorm1(self.mr1_conv1(tf_mr1_X)))
      mr2_h1 = LeakyReLU(alpha=self.leakiness)(self.mr2_Bnorm1(self.mr2_conv1(tf_mr2_X)))
      
      h2 = LeakyReLU(alpha=self.leakiness)(self.Bnorm2(self.conv2(concatenate([h1, mr1_h1]))))
                                           
      h3 = self.to_input_shape(concatenate([h2, mr2_h1]))
      print("h3:", h3.shape)
      
      for batch_num in range(h3.shape[0]):
          h4 = self.dense1(h3[batch_num, :, :])
          h5 = self.dense2(h4)
          h6 = self.dense3(h5)
          h7 = self.dense4(h6)
          h8 = self.zero_pad(h7)
          mask_list.append(h8)
      h9 = tf.convert_to_tensor(mask_list, dtype=tf.float32)
      h10 = tf.expand_dims(h9, -1)
      dh1 = sigmoid(self.deconv1(h10))
      print("dh1:", dh1.shape)
      return dh1
 
      
Example #19
0
 def call(self, x, training=True):
     if self.embedding_to_occur: x = self.incorporate_embeddings(x)
     x = self.process_hidden_layers(x, training)
     out = self.process_output_layers(x)
     if self.y_range:
         out = self.y_range[0] + (
             self.y_range[1] - self.y_range[0]) * activations.sigmoid(out)
     return out
 def __call__(self, inputs):
     y = self.contrast(inputs) + self.avg_pool(inputs)
     y = self.conv1(y)
     y = self.bn1(y)
     y = self.relu1(y)
     y = self.conv2(y)
     y = sigmoid(y)
     return inputs * y
def Squeeze_excitation_layer_simple(input_x, out_dim, ratio, layer_name):

    squeeze = GlobalAveragePooling2D()(input_x)
    excitation = sigmoid(squeeze)
    excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
    scale = input_x * excitation

    return scale
Example #22
0
    def CA(self, X):
        c = list(X.shape)[-1]
        gap = GlobalAveragePooling2D()(X)
        d = tf.reshape(gap, shape=(-1,1,1,c))
        d1 = ReLU()(Conv2D(filters=c//8, kernel_size=(1,1))(d))
        d_bid = sigmoid(Conv2D(filters=c, kernel_size=(1,1))(d1))

        return X*d_bid
 def call(self, inputs, training=False):
     """ Implements the feed forward part of the network """
     x = inputs
     for layer in self.layers_[:-1]:
         x = layer(x)
         x = relu(x, alpha=self.leaky_alpha)
         x = self.dropout(x, training)
     x = self.layers_[-1](x)
     return sigmoid(x)
Example #24
0
    def call(self, inputs, training=False):
        x, a, i = inputs
        ##global params, move to default non-option
        if self.glob:
            glob_avg = tf.math.segment_mean(x, i)
            glob_var = abs(
                tf.math.subtract(tf.math.segment_mean(multiply([x, x]), i),
                                 multiply([glob_avg, glob_avg])))
            glob_max = tf.math.segment_max(x, i)
            glob_min = tf.math.segment_min(x, i)
            xglob = tf.concat([glob_avg, glob_var, glob_max, glob_min], axis=1)
        a, e = self.generate_edge_features(x, a)
        ##this norm should maybe be further down ahead of edgeconv
        if self.edgenorm:
            e = self.norm_edge(e)
        x = self.MP([x, a, e])
        if self.edgeconv:
            a, e = self.generate_edge_features(x, a)
            x = self.ECC1([x, a, e])
        for conv in self.GCNs:
            x = conv([x, a])
        x1 = self.Pool1([x, i])
        x2 = self.Pool2([x, i])
        x3 = self.Pool3([x, i])
        xpool = tf.concat([x1, x2, x3], axis=1)
        if self.glob:
            x = tf.concat([xpool, xglob], axis=1)
        else:
            x = xpool
        for decode_layer, dropout_layer, norm_layer in zip(
                self.decode, self.dropout_layers, self.norm_layers):
            x = dropout_layer(x, training=training)
            x = self.decode_activation(decode_layer(x))
            x = norm_layer(x, training=training)

        x_loge = self.loge[0](x)
        x_loge = self.loge[1](x_loge)
        x_loge = self.loge_out(x_loge)

        x_angles = self.angles[0](x)
        x_angles = self.angles[1](x_angles)
        x_angles = self.angles_out(x_angles)
        zeniazi = sigmoid(self.angle_scale(x_angles))

        if self.n_sigs > 0:
            x_sigs = self.sigs[0](x)
            x_sigs = self.sigs[1](x_sigs)
            x_sigs = tf.abs(self.sigs_out(x_sigs)) + eps
        #could add correlation here
        xs = tf.stack(
            [x_loge[:, 0], zeniazi[:, 0] * np.pi, zeniazi[:, 1] * 2 * np.pi],
            axis=1)
        if self.n_sigs > 0:
            return tf.concat([xs, x_sigs], axis=1)
        else:
            return xs
Example #25
0
 def __call__(self, inputs):
     temp1 = self.contrast(inputs)
     temp2 = self.avg_pool(inputs)
     y = temp1 + temp2
     y = self.conv1(y)
     y = self.bn1(y)
     y = self.relu1(y)
     y = self.conv2(y)
     y = sigmoid(y)
     return inputs * y
Example #26
0
 def call(self, x, training=True):
     """Forward pass for the network. Note that it expects input data in the form (batch, seq length, features)"""
     if self.embedding_to_occur: x = self.incorporate_embeddings(x)
     training = training or training is None
     x, restricted_to_final_seq = self.process_hidden_layers(x, training)
     out = self.process_output_layers(x, restricted_to_final_seq)
     if self.y_range:
         out = self.y_range[0] + (
             self.y_range[1] - self.y_range[0]) * activations.sigmoid(out)
     return out
def Squeeze_excitation_layer(input_x, out_dim, ratio):

    squeeze = GlobalAveragePooling2D()(input_x)
    excitation = Dense(units=out_dim / ratio)(squeeze)
    excitation = ReLU()(excitation)
    excitation = Dense(units=out_dim)(excitation)
    excitation = sigmoid(excitation)
    excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
    scale = input_x * excitation

    return scale
Example #28
0
 def call(self, inputs):
     for i in np.arange(len(self.Dens)):
         if i == 0:
             x = self.Dens[i](inputs)
         else:
             x = self.Dens[i](x)
         x = self.BN[i](x)
         x = self.Drop[i](x)
     x = self.dens_last(x)
     # x = self.BN_last(x)
     return activations.sigmoid(x)
Example #29
0
    def call(self, y):
        yTimeLow = tf.cast(y[:, 0] > self.fromTime, dtype=tf.float32)
        yTimeHigh = tf.cast(y[:, 0] <= self.toTime, dtype=tf.float32)

        yPlaceA = tf.cast(tf.logical_and(self.atGate(0, y[:, 1], y[:, 2]),
                                         y[:, 3] == 1),
                          dtype=tf.float32)
        yPlaceB = tf.cast(tf.logical_and(self.atGate(1, y[:, 1], y[:, 2]),
                                         y[:, 4] == 1),
                          dtype=tf.float32)
        yPlaceC = tf.cast(tf.logical_and(self.atGate(2, y[:, 1], y[:, 2]),
                                         y[:, 5] == 1),
                          dtype=tf.float32)
        yPlace = activations.sigmoid(
            (yPlaceA + yPlaceB + yPlaceC - 0.5) * 10)  # OR

        yHeadGear = tf.cast(tf.logical_and(y[:, 6] == 1, y[:, 7] == 0),
                            dtype=tf.float32)
        return activations.sigmoid(
            (yTimeLow + yTimeHigh + yPlace + yHeadGear - 3.5) * 10)  # AND
Example #30
0
    def call(self, x):
        #. 3 heads concatenated in x : small, large, medium
        raw_s, raw_m, raw_l = x

        raw_s = self.reshape0(raw_s)
        raw_m = self.reshape1(raw_m)
        raw_l = self.reshape2(raw_l)

        ## extract 
        txty_s, twth_s, conf_s, prob_s = tf.split(
            raw_s, (2, 2, 1, self.num_classes), axis=-1
        )
        txty_m, twth_m, conf_m, prob_m = tf.split(
            raw_m, (2, 2, 1, self.num_classes), axis=-1
        )
        txty_l, twth_l, conf_l, prob_l = tf.split(
            raw_l, (2, 2, 1, self.num_classes), axis=-1
        )

        txty_s = activations.sigmoid(txty_s)
        txty_s = (txty_s - self.a_half[0]) * self.scales[0] + self.a_half[0]
        bxby_s = (txty_s + self.grid_coord[0]) / self.grid_size[0]
        txty_m = activations.sigmoid(txty_m)
        txty_m = (txty_m - self.a_half[1]) * self.scales[1] + self.a_half[1]
        bxby_m = (txty_m + self.grid_coord[1]) / self.grid_size[1]
        txty_l = activations.sigmoid(txty_l)
        txty_l = (txty_l - self.a_half[2]) * self.scales[2] + self.a_half[2]
        bxby_l = (txty_l + self.grid_coord[2]) / self.grid_size[2]

        conf_s = activations.sigmoid(conf_s)
        conf_m = activations.sigmoid(conf_m)
        conf_l = activations.sigmoid(conf_l)

        prob_s = activations.sigmoid(prob_s)
        prob_m = activations.sigmoid(prob_m)
        prob_l = activations.sigmoid(prob_l)

        bwbh_s = (self.anchors[0] / self.image_width) * backend.exp(twth_s)
        bwbh_m = (self.anchors[1] / self.image_width) * backend.exp(twth_m)
        bwbh_l = (self.anchors[2] / self.image_width) * backend.exp(twth_l)

        pred_s = self.concat0([bxby_s, bwbh_s, conf_s, prob_s])
        pred_m = self.concat1([bxby_m, bwbh_m, conf_m, prob_m])
        pred_l = self.concat2([bxby_l, bwbh_l, conf_l, prob_l])

        return pred_s, pred_m, pred_l