def threshold_accuracy(y_true, y_pred):
    threshold = 0.80
    if K.backend() == 'tensorflow':
        return K.mean(
            K.equal(y_true, K.tf.cast(K.lesser(y_pred, threshold),
                                      y_true.dtype)))
    else:
        return K.mean(K.equal(y_true, K.lesser(y_pred, threshold)))
 def _l1_smooth_loss(self, y_true, y_pred):
     absolute_value_loss = tf.abs(y_true - y_pred) - 0.5
     square_loss = 0.5 * (y_true - y_pred)**2
     absolute_value_condition = K.lesser(absolute_value_loss, 1.0)
     l1_smooth_loss = tf.select(absolute_value_condition, square_loss,
                                absolute_value_loss)
     return K.sum(l1_smooth_loss, axis=-1)
Пример #3
0
 def accuracy_metric(y, pred):
     diff = y[:, i] - pred[:, i]
     abs_diff = K.abs(diff)
     passes = K.lesser(abs_diff, threshold)
     passes = K.cast(passes, 'float32')
     accuracy = K.mean(passes)
     return accuracy
    def get_split_averages(input_tensor, input_mask, indices):
        # Splits input tensor into three parts based on the indices and
        # returns average of values prior to index, values at the index and
        # average of values after the index.
        # input_tensor: (batch_size, input_length, input_dim)
        # input_mask: (batch_size, input_length)
        # indices: (batch_size, 1)
        # (1, input_length)
        length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
        # (batch_size, input_length)
        batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
        tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1)  # (batch_size, input_length)
        greater_mask = K.greater(batched_range, tiled_indices)  # (batch_size, input_length)
        lesser_mask = K.lesser(batched_range, tiled_indices)  # (batch_size, input_length)
        equal_mask = K.equal(batched_range, tiled_indices)  # (batch_size, input_length)

        # We also need to mask these masks using the input mask.
        # (batch_size, input_length)
        if input_mask is not None:
            greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
            lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))

        post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)

        post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)
        pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)

        return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
Пример #5
0
    def step(self, x, states):
        h_tm1 = states[0]
        c_tm1 = states[1]
        t_tm1 = states[2]
        B_U = states[3]
        B_W = states[4]

        # time related variables, simply add +1 to t for now...starting from 0
        # need to find better way if asynchronous/irregular time input is desired
        # such as slicing input where first index is time and using that instead.
        t = t_tm1 + 1
        self.timegate = K.abs(self.timegate)
        period = self.timegate[0]
        shift = self.timegate[1]
        r_on = self.timegate[2]

        # modulo operation not implemented in Tensorflow backend, so write explicitly.
        # a mod n = a - (n * int(a/n))
        # phi = ((t - shift) % period) / period
        phi = ((t - shift) - (period * ((t - shift) // period))) / period

        # K.switch not consistent between Theano and Tensorflow backend, so write explicitly.
        up = K.cast(K.lesser(phi, r_on * 0.5), K.floatx()) * 2 * phi / r_on
        mid = K.cast(K.lesser(phi, r_on), K.floatx()) * \
              K.cast(K.greater(phi, r_on * 0.5), K.floatx()) * (
              2 - (2 * phi / r_on))
        end = K.cast(K.greater(phi, r_on * 0.5), K.floatx()) * self.alpha * phi
        k = up + mid + end

        # LSTM calculations
        z = K.dot(x * B_W[0], self.W) + K.dot(h_tm1 * B_U[0], self.U) + self.b

        z0 = z[:, :self.output_dim]
        z1 = z[:, self.output_dim: 2 * self.output_dim]
        z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
        z3 = z[:, 3 * self.output_dim:]

        i = self.inner_activation(z0)
        f = self.inner_activation(z1)
        # intermediate cell update
        c_hat = f * c_tm1 + i * self.activation(z2)
        c = k * c_hat + (1 - k) * c_tm1
        o = self.inner_activation(z3)
        # intermediate hidden update
        h_hat = o * self.activation(c_hat)
        h = k * h_hat + (1 - k) * h_tm1
        return h, [h, c, t]
Пример #6
0
    def call(self, x, mask=None):
        """[:batch] is for conv, [batch:] is for identity."""
        p = K.random_uniform((1, ))
        zeros = K.zeros_like(x)

        skip_out = K.concatenate([zeros, x], axis=1)
        no_skip_out = K.concatenate([x, zeros], axis=1)
        return K.switch(K.lesser(p[0], self.skip_rate[0]),
                        K.in_train_phase(skip_out, no_skip_out),
                        K.in_train_phase(no_skip_out, no_skip_out))
Пример #7
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)

        self.updates = [K.update_add(self.iterations, 1)]
        t = self.iterations + 1

        loss_prev = K.variable(0)
        loss_hat_prev = K.variable(0)
        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        ch_fact_lbound = K.switch(K.greater(loss, loss_prev), 1+self.thl, 1/(1+self.thu))
        ch_fact_ubound = K.switch(K.greater(loss, loss_prev), 1+self.thu, 1/(1+self.thl))
        loss_ch_fact = loss / loss_prev
        loss_ch_fact = K.switch(K.lesser(loss_ch_fact, ch_fact_lbound), ch_fact_lbound, loss_ch_fact)
        loss_ch_fact = K.switch(K.greater(loss_ch_fact, ch_fact_ubound), ch_fact_ubound, loss_ch_fact)
        loss_hat = K.switch(K.greater(t, 1), loss_hat_prev * loss_ch_fact, loss)

        d_den = K.switch(K.greater(loss_hat, loss_hat_prev), loss_hat_prev, loss_hat)
        d_t = (self.beta_3 * self.d) + (1. - self.beta_3) * K.abs((loss_hat - loss_hat_prev) / d_den)
        d_t = K.switch(K.greater(t, 1), d_t, 1.)
        self.updates.append(K.update(self.d, d_t))

        lr_hat = self.lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)) / (1. + (self.iterations * self.decay))

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            #mhat_t = m_t / (1. - K.pow(self.beta_1, t))
            self.updates.append(K.update(m, m_t))

            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            #vhat_t = v_t / (1. - K.pow(self.beta_2, t))
            self.updates.append(K.update(v, v_t))

            p_t = p - lr_hat * m_t / ((K.sqrt(v_t) * d_t) + self.epsilon)
            self.updates.append(K.update(p, p_t))

        self.updates.append(K.update(loss_prev, loss))
        self.updates.append(K.update(loss_hat_prev, loss_hat))
        return self.updates
Пример #8
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)

        self.updates = [K.update_add(self.iterations, 1)]
        t = self.iterations + 1

        loss_prev = K.variable(0)
        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        ch_fact_lbound = K.switch(K.greater(loss, loss_prev), 1+self.thl, 1/(1+self.thu))
        ch_fact_ubound = K.switch(K.greater(loss, loss_prev), 1+self.thu, 1/(1+self.thl))
        loss_ch_fact = loss / loss_prev
        loss_ch_fact = K.switch(K.lesser(loss_ch_fact, ch_fact_lbound), ch_fact_lbound, loss_ch_fact)
        loss_ch_fact = K.switch(K.greater(loss_ch_fact, ch_fact_ubound), ch_fact_ubound, loss_ch_fact)
        loss_hat = K.switch(K.greater(t, 1), loss_prev * loss_ch_fact, loss)

        d_den = K.switch(K.greater(loss_hat, loss_prev), loss_prev, loss_hat)
        d_t = (self.beta_3 * self.d) + (1. - self.beta_3) * K.abs((loss_hat - loss_prev) / d_den)
        d_t = K.switch(K.greater(t, 1), d_t, 1.)
        self.updates.append(K.update(self.d, d_t))

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            mhat_t = m_t / (1. - K.pow(self.beta_1, t))
            self.updates.append(K.update(m, m_t))

            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            vhat_t = v_t / (1. - K.pow(self.beta_2, t))
            self.updates.append(K.update(v, v_t))

            p_t = p - (self.lr / (1. + (self.iterations * self.decay))) * mhat_t / ((K.sqrt(vhat_t) * d_t) + self.epsilon)
            self.updates.append(K.update(p, p_t))

        self.updates.append(K.update(loss_prev, loss_hat))
        return self.updates
Пример #9
0
def reliability(y_true, y_pred):
    par = 0.1
    return K.mean(K.lesser(K.abs((y_pred - y_true) / y_true), par))
Пример #10
0
def limit(x):
    y = tf.select(K.greater(x, 100000), 1000000. * K.ones_like(x), x)
    z = tf.select(K.lesser(y, -100000), -1000000. * K.ones_like(x), y)
    return z
Пример #11
0
 def get_masked_samples(self, samples):
     mask = K.cast(
         tf.logical_and(K.greater(samples, 0),
                        K.lesser(samples, self.max_items)), K.floatx())
     masked_samples = K.flatten(K.minimum(samples, int(self.max_items)))
     return mask, masked_samples