Пример #1
0
def competition_coef(y_true, y_pred, smooth=1):
    y_pred = K.argmax(y_pred, axis=-1)
    y_true = K.argmax(y_true, axis=-1)
    # try:
    # Compute tumor+kidney Dice
    tk_pd = K.greater(y_pred, 0)
    tk_gt = K.greater(y_true, 0)
    intersection = K.all(K.stack([tk_gt, tk_pd], axis=3), axis=3)
    tk_dice = (2 * K.sum(K.cast(intersection, K.floatx())) + smooth)/ (
            K.sum(K.cast(tk_pd, K.floatx())) + K.sum(K.cast(tk_gt, K.floatx())) + smooth
    )
    # except ZeroDivisionError:
    #     return 0

    # try:
        # Compute tumor Dice
    tu_pd = K.greater(y_pred, 1)
    tu_gt = K.greater(y_true, 1)
    intersection = K.all(K.stack([tu_pd, tu_gt], axis=3), axis=3)
    tu_dice = (2 * K.sum(K.cast(intersection, K.floatx())) + smooth)/ (
            K.sum(K.cast(tu_pd, K.floatx())) + K.sum(K.cast(tu_gt, K.floatx())) + smooth
    )
    # except ZeroDivisionError:
    #     return tk_dice / 2.0
    return (tk_dice+tu_dice) / 2.0
Пример #2
0
def summarize(x):
    x = K.permute_dimensions(tf.convert_to_tensor(x), (1, 0, 2))
    confidences, bounding_boxes = x[..., 0], x[..., 1:]

    positive_pred_mask = K.greater(confidences, 0.5)
    positive_pred_mask = K.cast(positive_pred_mask, 'float32')

    sum_confidences = K.sum(confidences, axis=-1)
    sum_positive_pred_mask = K.cast(
        K.greater(sum_confidences, CLASSIFICATION_THRESHOLD), 'float32')

    n_positive_pred = K.sum(positive_pred_mask, axis=-1)

    positive_boxes = bounding_boxes * K.expand_dims(positive_pred_mask,
                                                    axis=-1)
    denominator = n_positive_pred + (
        1 - K.cast(K.greater(n_positive_pred, 0), 'float32'))
    avg_boxes = K.sum(positive_boxes, axis=-2) / K.expand_dims(denominator)

    boxes = avg_boxes * K.expand_dims(sum_positive_pred_mask)

    result = K.concatenate((K.expand_dims(sum_confidences / 3), boxes),
                           axis=-1)

    return result
Пример #3
0
    def call(self, inputs):

        PMW_Score = tf.nn.conv2d(inputs,
                                 self.PWM,
                                 strides=[1, 1, 1, 1],
                                 padding='VALID')

        PMWrc_Score = tf.nn.conv2d(inputs,
                                   self.PWMrc,
                                   strides=[1, 1, 1, 1],
                                   padding='VALID')

        Indicator_f = K.cast(K.greater(PMW_Score, self.score_cut),
                             self.dtype_now)

        Indicator_r = K.cast(K.greater(PMWrc_Score, self.score_cut),
                             self.dtype_now)

        Indicator = Maximum()([Indicator_r, Indicator_f])

        S_relu = Maximum()([PMW_Score, PMWrc_Score])

        S_i_S_max = S_relu - self.max_s

        S_i_S_max_lam = S_i_S_max * self.kernel_1

        K_i_m_n = tf.math.exp(S_i_S_max_lam)

        K_relu = K_i_m_n * Indicator

        Ko_relu = tf.pad(K_relu, self.paddings, 'CONSTANT') * self.kernel_2

        return Ko_relu
Пример #4
0
        def BetheBlochGeant(
                lnbg, kp0, kp1, kp2, kp3,
                kp4):  #kp0=2.33,kp1=0.20,kp2=3.00,kp3=173.0e-9,kp4=0.49848
            bg = K.exp(lnbg)
            mK = 0.307075e-3
            me = 0.511e-3
            rho = kp0
            x0 = kp1 * 2.303
            x1 = kp2 * 2.303
            mI = kp3
            mZA = kp4
            bg2 = bg * bg
            maxT = 2 * me * bg2

            x = lnbg
            lhwI = K.log(28.816e-9 * K.sqrt(K.cast(rho * mZA, dtype=float)) /
                         mI)

            d2 = K.switch(
                K.greater(x, x1), lhwI + x - 0.5,
                K.switch(
                    K.greater(x, x0), lhwI + x - 0.5 + (0.5 - lhwI - x0) *
                    (((x1 - x) / (x1 - x0))**3), 0. * bg))

            return mK * mZA * (1 +
                               bg2) / bg2 * (0.5 * K.log(2 * me * bg2 * maxT /
                                                         (mI * mI)) - bg2 /
                                             (1 + bg2) - d2)
Пример #5
0
    def attn(self,embedding):
    # def attn(self):
        print(embedding.shape)
        sent_input = Input(shape=(self.sent_lenth,))
        sent = Embedding(input_dim=embedding.shape[0], output_dim=embedding.shape[1], weights=[embedding], trainable=False,name="sent")(sent_input)
        ent1_input = Input(shape=(self.sent_lenth,))
        ent1 = Embedding(input_dim=embedding.shape[0], output_dim=embedding.shape[1], weights=[embedding], trainable=False,name="ent1")(ent1_input)
        ent2_input = Input(shape=(self.sent_lenth,))
        ent2 = Embedding(input_dim=embedding.shape[0], output_dim=embedding.shape[1], weights=[embedding], trainable=False,name="ent2")(ent2_input)

        # sent_input = Input(shape=(self.sent_lenth,))
        # sent = Embedding(input_dim=20000, output_dim=200, trainable=False, name="word")(sent_input)
        # ent1_input = Input(shape=(self.sent_lenth,))
        # ent1 = Embedding(input_dim=20000, output_dim=200, trainable=False, name="ent1")(ent1_input)
        # ent2_input = Input(shape=(self.sent_lenth,))
        # ent2 = Embedding(input_dim=20000, output_dim=200, trainable=False, name="ent2")(ent2_input)
        #

        mask_s = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(sent_input)
        mask_e1 = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(ent1_input)
        mask_e2 = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(ent2_input)


        # sent = OurBidirectional(LSTM(100, recurrent_dropout=0.25, activation='relu', return_sequences=True))([sent,mask_s])
        s1 = SparseSelfAttention(20, 10)([sent,mask_s])
        s2 = Capsule(num_capsule=self.sent_lenth, dim_capsule=200, routings=3)(sent)
        s2 = Dropout(0.25)(s2)
        e1 =  OurBidirectional(LSTM(50,return_sequences=True))([ent1, mask_e1])
        e2 =  OurBidirectional(LSTM(50,return_sequences=True))([ent2, mask_e2])# activation='relu',
        e = Concatenate(axis=2)([e1,e2])
        sent_ent1 = Add()([s1, e])
        sent_ent2 = Add()([s2, e])
        output = Concatenate()([sent_ent1,sent_ent2])
        output = KMaxPooling(k=3)(output)
        # output = Flatten()(output)
        output = Dense(units=128,#64
                       kernel_regularizer=regularizers.l2(0.002)#(L1)
                       )(output)#(RDD 不适用 ))
        output = Activation("relu")(output)
        # at2
        # sent =  OurBidirectional(LSTM(100, recurrent_dropout=0.25, activation='relu', return_sequences=True))([sent,mask_s])
        # s1 = SparseSelfAttention(20, 10)([sent,mask_s])
        # s2 = Capsule(num_capsule=140, dim_capsule=200, routings=3)(s1)
        # s3 = Add()([s1,s2])
        # s3 = Dropout(0.2)(s3)
        # output = KMaxPooling(k=3)(s3)
        # output = Dense(units=64,
        #                kernel_regularizer=regularizers.l2(0.001)  # (L1)
        #                )(output)  # (RDD 不适work.errors_impl.InvalidArgumentError: Inc用 ))
        # output = Activation("relu")(output)



        output = Dense(units=self.Lclass,kernel_regularizer=regularizers.l2(0.002))(output)
        output = Activation("softmax")(output)
        model = Model(inputs=[sent_input, ent1_input, ent2_input], outputs=[output])
        model.compile(loss='categorical_crossentropy', optimizer='adam',
                      metrics=['acc',f1])
        model.summary()
        self.model = model
Пример #6
0
def iou(y_true, y_pred):
    y_true = K.cast(K.greater(y_true, 0.5), dtype='float32')
    y_pred = K.cast(K.greater(y_pred, 0.5), dtype='float32')
    inter = K.sum(K.sum(K.squeeze(y_true * y_pred, axis=3), axis=2), axis=1)
    union = K.sum(K.sum(K.squeeze(K.clip(y_true + y_pred, 0, 1), axis=3),
                        axis=2),
                  axis=1)
    return K.mean((inter + K.epsilon()) / (union + K.epsilon()))
def metric_bit_error_rate(ground_truth, observed):
    threshold = np.ones(observed.shape[-1:]) * 0.5
    threshold = K.constant(threshold.tolist())
    observed = K.greater(observed, threshold)
    ground_truth = K.greater(ground_truth, threshold)
    compare = K.equal(ground_truth, observed)
    result = 1 - K.mean(compare)
    return result
Пример #8
0
    def update_state(self, y_true, y_pred, sample_weight=None):

        y_true = K.greater(y_true, self.threshold)
        y_pred = K.greater(y_pred, self.threshold)
        pos = tf.math.logical_and(y_true, y_pred)
        # print(pos)
        # print(K.equal(y_true, y_pred))
        true_pos = K.sum(K.cast(pos, dtype=tf.float32))
        self.cat_true_pos.assign_add(true_pos)
Пример #9
0
 def _get_semihard_anchor_negative_triplet_mask(self, negative_dist: Tensor,
                                                hardest_positive_dist: Tensor,
                                                mask_negative: Tensor) -> Tensor:
     # mask max(dist(a,p)) < dist(a,n)
     mask = K.greater(negative_dist, hardest_positive_dist)
     mask = K.cast(mask, K.dtype(negative_dist))
     mask_semihard = K.cast(K.expand_dims(K.greater(K.sum(mask, 1), 0.0), 1), K.dtype(negative_dist))
     mask = mask_negative * (1 - mask_semihard) + mask * mask_semihard
     return mask
Пример #10
0
    def call(self, inputs, **kwargs):
        input_shape = K.int_shape(inputs)
        sequence_length, d_model = input_shape[-2:]
        # output of the "sigmoid halting unit" (not the probability yet)
        halting = K.sigmoid(
            K.reshape(
                K.bias_add(K.dot(K.reshape(inputs, [-1, d_model]),
                                 self.halting_kernel),
                           self.halting_biases,
                           data_format='channels_last'),
                [-1, sequence_length]))
        if self.zeros_like_halting is None:
            self.initialize_control_tensors(halting)
        # useful flags
        step_is_active = K.greater(self.halt_budget, 0)
        no_further_steps = K.less_equal(self.halt_budget - halting, 0)
        # halting probability is equal to
        # a. halting output if this isn't the last step (we have some budget)
        # b. to remainder if it is,
        # c. and zero for the steps that shouldn't be executed at all
        #    (out of budget for them)
        halting_prob = K.switch(
            step_is_active, K.switch(no_further_steps, self.remainder,
                                     halting), self.zeros_like_halting)
        self.active_steps += K.switch(step_is_active, self.ones_like_halting,
                                      self.zeros_like_halting)
        # We don't know which step is the last, so we keep updating
        # expression for the loss with each call of the layer
        self.ponder_cost = (self.time_penalty_t *
                            K.mean(self.remainder + self.active_steps))
        # Updating "the remaining probability" and the halt budget
        self.remainder = K.switch(no_further_steps, self.remainder,
                                  self.remainder - halting)
        self.halt_budget -= halting  # OK to become negative

        # If none of the inputs are active at this step, then instead
        # of zeroing them out by multiplying to all-zeroes halting_prob,
        # we can simply use a constant tensor of zeroes, which means that
        # we won't even calculate the output of those steps, saving
        # some real computational time.
        if self.zeros_like_input is None:
            self.zeros_like_input = K.zeros_like(inputs,
                                                 name='zeros_like_input')
        # just because K.any(step_is_active) doesn't work in PlaidML
        any_step_is_active = K.greater(K.sum(K.cast(step_is_active, 'int32')),
                                       0)
        step_weighted_output = K.switch(
            any_step_is_active,
            K.expand_dims(halting_prob, -1) * inputs, self.zeros_like_input)
        if self.weighted_output is None:
            self.weighted_output = step_weighted_output
        else:
            self.weighted_output += step_weighted_output
        return [inputs, self.weighted_output]
Пример #11
0
    def wrapper(y_true, y_pred):
        if (usesoftmax):
            y_pred = tf.keras.activations.softmax(y_pred)
        y_pred = backend.clip(y_pred, _EPSILON, 1.0 - _EPSILON)
        """ here all calculations will be based on the class greater than 0, except accuracy"""
        avgIOU = 0.0

        for i in range(batch_size):
            numUnion = 1.0
            recall = 0.0
            numClass = 0.0
            IOU = 0.0
            mask = backend.argmax(y_true[i], -1)
            pred = backend.argmax(y_pred[i], -1)

            for c in np.arange(1, num_classes, 1):
                msk_equal = backend.cast(backend.equal(mask, c),
                                         dtype='float32')

                masks_sum = backend.sum(msk_equal)

                predictions_sum = backend.sum(
                    backend.cast(backend.equal(pred, c), 'float32'))

                numTrue = backend.sum(
                    backend.cast(backend.equal(pred, c), 'float32') *
                    backend.cast(backend.equal(mask, c), 'float32'))
                unionSize = masks_sum + predictions_sum - numTrue
                maskhaslabel = backend.greater(masks_sum, 0)
                predhaslabel = backend.greater(predictions_sum, 0)

                predormaskexistlabel = backend.any(backend.stack(
                    [maskhaslabel, predhaslabel], axis=0),
                                                   axis=0)

                IOU = backend.switch(predormaskexistlabel,
                                     lambda: IOU + numTrue / unionSize,
                                     lambda: IOU)
                numUnion = backend.switch(predormaskexistlabel,
                                          lambda: numUnion + 1,
                                          lambda: numUnion)
                recall = backend.switch(maskhaslabel,
                                        lambda: recall + numTrue / masks_sum,
                                        lambda: recall)
                numClass = backend.switch(maskhaslabel, lambda: numClass + 1,
                                          lambda: numClass)
            IOU = IOU / numUnion
            avgIOU = avgIOU + IOU
        avgIOU = avgIOU / batch_size
        iou_loss = 1.0 - avgIOU
        # print(np.shape(y_true), np.shape(y_pred))
        main_loss = backend.mean(weighted_loss_fn(y_true, y_pred))
        # dice_loss = soft_dice_loss(y_true, y_pred)
        return main_loss + 0.1 * iou_loss
Пример #12
0
def weighted_sum(first,
                 second,
                 sigma,
                 first_threshold=-np.inf,
                 second_threshold=np.inf):
    logit_probs = first * sigma + second * (1.0 - sigma)
    infty_tensor = K.ones_like(logit_probs) * INFTY
    logit_probs = K.switch(K.greater(first, first_threshold), logit_probs,
                           infty_tensor)
    logit_probs = K.switch(K.greater(second, second_threshold), logit_probs,
                           infty_tensor)
    return logit_probs
Пример #13
0
def mask_func(input, mask, mode='add'):
    if K.dims(mask) == 2:
        # mask:[batch,steps]
        # mask:[batch,steps,1]
        mask = K.expand_dims(
            K.expand_dims(K.cast(K.greater(mask, 0), tf.float32), 2), 0)
    else:
        mask = K.expand_dims(K.cast(K.greater(mask, 0), tf.float32), 0)

    if mode == 'add':
        mask = (1 - mask) * 1e10
        return input + mask
    else:
        return input * mask
Пример #14
0
    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.shape(p) for p in params]
        alphas = [
            K.variable(K.ones(shape) * self.init_alpha) for shape in shapes
        ]
        old_grads = [K.zeros(shape) for shape in shapes]
        prev_weight_deltas = [K.zeros(shape) for shape in shapes]
        self.weights = alphas + old_grads
        self.updates = []

        for param, grad, old_grad, prev_weight_delta, alpha in zip(
                params, grads, old_grads, prev_weight_deltas, alphas):
            # equation 4
            new_alpha = K.switch(
                K.greater(grad * old_grad, 0),
                K.minimum(alpha * self.scale_up, self.max_alpha),
                K.switch(K.less(grad * old_grad, 0),
                         K.maximum(alpha * self.scale_down, self.min_alpha),
                         alpha))

            # equation 5
            new_delta = K.switch(
                K.greater(grad, 0), -new_alpha,
                K.switch(K.less(grad, 0), new_alpha, K.zeros_like(new_alpha)))

            # equation 7
            weight_delta = K.switch(K.less(grad * old_grad, 0),
                                    -prev_weight_delta, new_delta)

            # equation 6
            new_param = param + weight_delta

            # reset gradient_{t-1} to 0 if gradient sign changed (so that we do
            # not "double punish", see paragraph after equation 7)
            grad = K.switch(K.less(grad * old_grad, 0), K.zeros_like(grad),
                            grad)

            # Apply constraints
            #if param in constraints:
            #    c = constraints[param]
            #    new_param = c(new_param)

            self.updates.append(K.update(param, new_param))
            self.updates.append(K.update(alpha, new_alpha))
            self.updates.append(K.update(old_grad, grad))
            self.updates.append(K.update(prev_weight_delta, weight_delta))

        return self.updates
Пример #15
0
 def get_psp(self, output_spikes):
     new_spiketimes = tf.where(k.greater(output_spikes, 0),
                               k.ones_like(output_spikes) * self.time,
                               self.last_spiketimes)
     new_spiketimes = tf.where(k.less(output_spikes, 0),
                               k.zeros_like(output_spikes) * self.time,
                               new_spiketimes)
     assign_new_spiketimes = tf.assign(self.last_spiketimes, new_spiketimes)
     with tf.control_dependencies([assign_new_spiketimes]):
         last_spiketimes = self.last_spiketimes + 0  # Dummy op
         # psp = k.maximum(0., tf.divide(self.dt, last_spiketimes))
         psp = tf.where(k.greater(last_spiketimes, 0),
                        k.ones_like(output_spikes) * self.dt,
                        k.zeros_like(output_spikes))
     return psp
Пример #16
0
    def cindex(y_true, y_pred):
        y = y_true[:, 0]
        e = y_true[:, 1]
        ydiff = y[tf.newaxis, :] - y[:, tf.newaxis]
        yij = K.cast(
            K.greater(ydiff, 0),
            K.floatx()) + K.cast(K.equal(ydiff, 0), K.floatx()) * K.cast(
                e[:, tf.newaxis] != e[tf.newaxis, :], K.floatx())  # yi > yj
        is_valid_pair = yij * e[:, tf.newaxis]

        ypdiff = tf.transpose(y_pred) - y_pred
        ypij = K.cast(K.greater(ypdiff, 0), K.floatx()) + 0.5 * K.cast(
            K.equal(ypdiff, 0), K.floatx())  # yi > yj
        cidx = (K.sum(ypij * is_valid_pair)) / K.sum(is_valid_pair)
        return cidx
Пример #17
0
 def add_boundary_energy(self, energy, mask, start, end):
     start = K.expand_dims(K.expand_dims(start, 0), 0)
     end = K.expand_dims(K.expand_dims(end, 0), 0)
     if mask is None:
         energy = K.concatenate([energy[:, :1, :] + start, energy[:, 1:, :]],
                                axis=1)
         energy = K.concatenate([energy[:, :-1, :], energy[:, -1:, :] + end],
                                axis=1)
     else:
         mask = K.expand_dims(K.cast(mask, K.floatx()))
         start_mask = K.cast(K.greater(mask, self.shift_right(mask)), K.floatx())
         end_mask = K.cast(K.greater(self.shift_left(mask), mask), K.floatx())
         energy = energy + start_mask * start
         energy = energy + end_mask * end
     return energy
Пример #18
0
    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.inital_decay > 0:
            lr *= (1. / (1. + self.decay * self.iterations))

        t = self.iterations + 1
        lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (
            1. - K.pow(self.beta_1, t))

        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]
        f = K.variable(0)
        d = K.variable(1)
        self.weights = [self.iterations] + ms + vs + [f, d]

        cond = K.greater(t, K.variable(1))
        small_delta_t = K.switch(K.greater(loss, f), self.small_k + 1,
                                 1. / (self.big_K + 1))
        big_delta_t = K.switch(K.greater(loss, f), self.big_K + 1,
                               1. / (self.small_k + 1))

        c_t = K.minimum(K.maximum(small_delta_t, loss / (f + self.epsilon)),
                        big_delta_t)
        f_t = c_t * f
        r_t = K.abs(f_t - f) / (K.minimum(f_t, f))
        d_t = self.beta_3 * d + (1 - self.beta_3) * r_t

        f_t = K.switch(cond, f_t, loss)
        d_t = K.switch(cond, d_t, K.variable(1.))

        self.updates.append(K.update(f, f_t))
        self.updates.append(K.update(d, d_t))

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            p_t = p - lr_t * m_t / (d_t * K.sqrt(v_t) + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            new_p = p_t
            self.updates.append(K.update(p, new_p))
        return self.updates
Пример #19
0
 def score(y_true, y_pred):
     y_error = y_pred - y_true
     bool_idx = K.greater(y_error, 0)
     loss1 = K.exp(-y_error / 13) - 1  # less 0
     loss2 = K.exp(y_error / 10) - 1  # greater 0
     loss = K.switch(bool_idx, loss2, loss1)
     return K.sum(loss)
Пример #20
0
def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred = K.cast(y_pred, 'float32')
    y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32')
    intersection = y_true_f * y_pred_f
    score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
    return score
Пример #21
0
def weighted_bce_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    if K.int_shape(y_pred)[1] == 128:
        kernel_size = 11
    elif K.int_shape(y_pred)[1] == 256:
        kernel_size = 21
    else:
        raise ValueError('Unexpected image size')
    averaged_mask = K.pool2d(y_true,
                             pool_size=(kernel_size, kernel_size),
                             strides=(1, 1),
                             padding='same',
                             pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(
        K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = weighted_bce_loss(y_true, y_pred, weight) + (
        1 - weighted_dice_coeff(y_true, y_pred, weight))
    return loss
Пример #22
0
 def binary_accuracy(y_true, y_pred):
     validity = K.cast(validity_mask, dtype='float32')
     num_lines = K.sum(validity, axis=1)
     return K.sum(K.cast(K.equal(
         y_true, K.cast(K.greater(y_pred, threshold), dtype='float32')),
                         dtype='float32') * validity,
                  axis=1) / num_lines
    def make_readout_decode_model(self, max_output_len=32):
        src_seq_input = Input(shape=(None, ), dtype='int32')
        tgt_start_input = Input(shape=(1, ), dtype='int32')
        src_seq = src_seq_input
        enc_mask = Lambda(lambda x: K.cast(K.greater(x, 0), 'float32'))(
            src_seq)
        src_emb = self.i_word_emb(src_seq)
        if self.pos_emb:
            src_emb = add_layer([src_emb, self.pos_emb(src_seq)])

        src_emb = self.emb_dropout(src_emb)
        enc_output = self.encoder(src_emb, src_seq)

        tgt_emb = self.o_word_emb(tgt_start_input)
        tgt_seq = Lambda(lambda x: K.repeat_elements(x, max_output_len, 1))(
            tgt_start_input)
        rep_input = Lambda(lambda x: K.repeat_elements(x, max_output_len, 1))(
            tgt_emb)

        cell = ReadoutDecoderCell(self.o_word_emb, self.pos_emb, self.decoder,
                                  self.target_layer)
        final_output = InferRNN(cell, return_sequences=True)(rep_input,
          initial_state=[tgt_start_input, K.ones_like(tgt_start_input), K.zeros_like(tgt_seq)] + \
            [rep_input for _ in self.decoder.layers],
          constants=[enc_output, enc_mask])
        final_output = Lambda(lambda x: K.squeeze(x, -1))(final_output)
        self.readout_model = Model([src_seq_input, tgt_start_input],
                                   final_output)
Пример #24
0
def select_threshold(tensor, thresh=None):
    """Returns a tensor with items greater than the provided threshold set to 1, else 0."""
    if thresh is None:  # If threshold is not provided.
        return tensor
    tensor = K.greater(tensor, thresh)
    tensor = tf.cast(tensor, tf.float32)
    return tensor
Пример #25
0
    def get_new_mem(self):
        """Add input to membrane potential."""

        # Destroy impulse if in refractory period
        masked_impulse = self.impulse if self.tau_refrac == 0 else \
            tf.where(k.greater(self.refrac_until, self.time),
                     k.zeros_like(self.impulse), self.impulse)

        new_mem = self.mem + masked_impulse

        if self.config.getboolean('cell', 'leak'):
            # Todo: Implement more flexible version of leak!
            new_mem = tf.where(k.greater(new_mem, 0), new_mem - 0.1 * self.dt,
                               new_mem)

        return new_mem
Пример #26
0
    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.get_variable_shape(p) for p in params]
        alphas = [
            K.variable(K.ones(shape) * self.init_alpha) for shape in shapes
        ]
        old_grads = [K.zeros(shape) for shape in shapes]
        self.weights = alphas + old_grads
        self.updates = []

        for p, grad, old_grad, alpha in zip(params, grads, old_grads, alphas):
            grad = K.sign(grad)
            new_alpha = K.switch(
                K.greater(grad * old_grad, 0),
                K.minimum(alpha * self.scale_up, self.max_alpha),
                K.switch(K.less(grad * old_grad, 0),
                         K.maximum(alpha * self.scale_down, self.min_alpha),
                         alpha))

            grad = K.switch(K.less(grad * old_grad, 0), K.zeros_like(grad),
                            grad)
            new_p = p - grad * new_alpha

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)
            self.updates.append(K.update(p, new_p))
            self.updates.append(K.update(alpha, new_alpha))
            self.updates.append(K.update(old_grad, grad))

        return self.updates
Пример #27
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        adam_lr = self.adam_lr
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay *
                             K.cast(self.iterations, K.dtype(self.decay))))
            adam_lr = adam_lr * (1. / (1. + self.decay * K.cast(
                self.iterations, K.dtype(self.decay))))

        t = K.cast(self.iterations, K.floatx()) + 1
        adam_lr_t = adam_lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                               (1. - K.pow(self.beta_1, t)))

        # momentum
        shapes = [K.int_shape(p) for p in params]
        moments = [K.zeros(shape) for shape in shapes]
        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        else:
            vhats = [K.zeros(1) for _ in params]
        self.ms = K.zeros(K.int_shape(params[0]), dtype=K.dtype(params[0]))
        self.vs = K.zeros(K.int_shape(params[0]), dtype=K.dtype(params[0]))
        self.weights = [self.iterations] + moments + vhats + [self.ms
                                                              ] + [self.vs]
        for i, (p, g, m, vhat) in enumerate(zip(params, grads, moments,
                                                vhats)):
            v = self.momentum * m - lr * g  # velocity
            self.updates.append(K.update(m, v))

            if self.nesterov:
                new_p = p + self.momentum * v - lr * g
            else:
                new_p = p + v

            if i == 0 and self.e2efs_layer is not None:
                nnz = K.sum(K.cast(K.greater(p, 0.), K.floatx()))
                m_t = (self.beta_1 * self.ms) + (1. - self.beta_1) * g
                v_t = (self.beta_2 *
                       self.vs) + (1. - self.beta_2) * K.square(g)
                if self.amsgrad:
                    vhat_t = K.maximum(vhat, v_t)
                    p_t = p - adam_lr_t * m_t / (K.sqrt(vhat_t) + K.epsilon())
                    self.updates.append(K.update(vhat, vhat_t))
                else:
                    p_t = p - adam_lr_t * m_t / (K.sqrt(v_t) + K.epsilon())

                self.updates.append(K.update(self.ms, m_t))
                self.updates.append(K.update(self.vs, v_t))
                new_p = K.switch(K.less_equal(nnz, self.e2efs_layer.units),
                                 new_p, p_t)

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates
Пример #28
0
def _custom_loss(y_true, y_pred):
    """Computes loss for single trigger word detection model.
    
    The loss is the sum over samples and timesteps of the binary cross entropy
    between target and prediction.
    
    The only variation is that values of target lower than -0.001 are
    interpreted as a don't care values. Where don't care value are present
    the loss is forced to zero.
    
    Args:
        y_true (keras.backend.Tensor): target (shape = (#samples, #timesteps, 1))
        y_pred (keras.backend.Tensor): predictions to match against the target,
            same shape of y_true
        
    Returns:
        keras.backend.Tensor: Scalar cost.
    """
    # COMPUTING BINARY CROSS-ENTROPY
    one = K.ones(K.shape(y_true))
    loss = -y_true * K.log(tf.clip_by_value(y_pred, 1e-10, 1.0)) - (
        one - y_true) * K.log(tf.clip_by_value(one - y_pred, 1e-10, 1.0))

    #SETTING TO ZERO WHERE TARGET IS "DON't CARE"
    thres = tf.fill(K.shape(y_true), -0.001)
    fil = tf.cast(K.greater(y_true, thres), tf.float32)
    loss_filtered = loss * fil

    #SUMMING OVER TIMESTEP AND SAMPLES
    cost = K.sum(loss_filtered)
    return cost
Пример #29
0
 def selective_acc(y_true, y_pred):
     g = K.cast(K.greater(y_pred[:, -1], 0.5), K.floatx())
     temp1 = K.sum((g) * K.cast(
         K.equal(K.argmax(y_true[:, :-1], axis=-1),
                 K.argmax(y_pred[:, :-1], axis=-1)), K.floatx()))
     temp1 = temp1 / K.sum(g)
     return K.cast(temp1, K.floatx())
Пример #30
0
def ActivationCompare():
    """
    激活函数比较
    :return:
    """
    inputs = tf.constant(np.linspace(-4, 4, 101))

    # 1. ReLU
    relu = K.relu(inputs)

    # 2. ReLUSwish
    condition = K.greater(inputs, 0)
    relu_swish = tf.where(condition, inputs, 2 * inputs * K.sigmoid(inputs))

    # 3. Swish
    # swish = inputs * K.sigmoid(inputs)
    swish = tf.nn.swish(inputs)

    # 4. Mish
    mish = inputs * K.tanh(K.softplus(inputs))

    plot_dict = {
        'ReLU': [relu.numpy(), 'r'],
        'ReLUSwish': [relu_swish.numpy(), 'b'],
        'Swish': [swish.numpy(), 'g'],
        'Mish': [mish.numpy(), 'y']
    }
    compare_data(plot_dict,
                 inputs.numpy(),
                 save_path='acitvations_pic/activation1.svg')