def call(self, x):
        y = x[1]
        x_normalize = tf.math.l2_normalize(x[0])
        k_normalize = tf.math.l2_normalize(self.kernel)

        cos_m = K.cos(self.m)
        sin_m = K.sin(self.m)
        th = K.cos(np.pi - self.m)
        mm = K.sin(np.pi - self.m) * self.m

        cosine = K.dot(x_normalize, k_normalize)
        sine = K.sqrt(1.0 - K.square(cosine))

        phi = cosine * cos_m - sine * sin_m

        if self.easy_margin:
            phi = tf.where(cosine > 0, phi, cosine)

        else:
            phi = tf.where(cosine > th, phi, cosine - mm)

        output = (y * phi) + ((1.0 - y) * cosine)
        output *= self.s

        return output
Exemplo n.º 2
0
def convert_gaze_2d_3d(angles):
    x = -cos(angles[:, 0]) * sin(angles[:, 1])
    y = -sin(angles[:, 0])
    z = -cos(angles[:, 1]) * cos(angles[:, 1])
    norm = sqrt(x**2 + y**2 + z**2)
    x /= norm
    y /= norm
    z /= norm
    return x, y, z
Exemplo n.º 3
0
 def noised():
     Ni = K.shape(x)[0] #This is the number in the batch
     #get an angle to shift each image in the batch
     angles = K.clip( self.amount*K.random_normal((Ni,)),   self.lower,   self.upper)
     #We are going to post multiply the vector (x'=xR) with the matrix 
     #rather than the normal way (x'=Rx)
     #so we use the transpose of what is shown in literature for R
     R = K.stack( (K.stack((K.cos(angles),K.sin(angles)),axis=1)  ,
                    K.stack((-K.sin(angles),K.cos(angles)),axis=1))  ,
                  axis=1)
     return tf.matmul(x,R) 
Exemplo n.º 4
0
def sinc(band, t_right):
    y_right = K.sin(
        2 * math.pi * band * t_right) / (2 * math.pi * band * t_right)
    # y_left = flip(y_right, 0) TODO remove if useless
    y_left = K.reverse(y_right, 0)
    y = K.concatenate([y_left, K.variable(K.ones(1)), y_right])
    return y
Exemplo n.º 5
0
def positional_signal(hidden_size: int, length: int,
                      min_timescale: float = 1.0, max_timescale: float = 1e4):
    """
    Helper function, constructing positional encodings as described in
    "Attention is All You Need" (https://arxiv.org/abs/1706.03762)
    The implementation was taken from https://github.com/kpot/keras-transformer
    """

    if hidden_size % 2 != 0:
        raise ValueError(
            f"The hidden dimension of the model must be divisible by 2. "
            f"Currently it is {hidden_size}")
    position = K.arange(0, length, dtype=K.floatx())
    num_timescales = hidden_size // 2
    log_timescale_increment = K.constant(
        (np.log(float(max_timescale) / float(min_timescale)) /
         (num_timescales - 1)),
        dtype=K.floatx())
    inv_timescales = (
            min_timescale *
            K.exp(K.arange(num_timescales, dtype=K.floatx()) *
                  -log_timescale_increment))
    scaled_time = K.expand_dims(position, 1) * K.expand_dims(inv_timescales, 0)
    signal = K.concatenate([K.sin(scaled_time), K.cos(scaled_time)], axis=1)
    return K.expand_dims(signal, axis=0)
Exemplo n.º 6
0
def cartpole_next_state_fn(input, action):
    gravity = 9.8
    masscart = 1.0
    masspole = 0.1
    total_mass = (masspole + masscart)
    length = 0.5  # actually half the pole's length
    polemass_length = (masspole * length)
    force_mag = 10.0
    tau = 0.02  # seconds between state updates
    x = input[:, 0]
    x_dot = input[:, 1]
    theta = input[:, 2]
    theta_dot = input[:, 3]
    costheta = K.cos(theta)
    sintheta = K.sin(theta)
    force = force_mag if action == 1 else -force_mag
    temp = (force +
            polemass_length * theta_dot * theta_dot * sintheta) / total_mass
    thetaacc = (gravity * sintheta - costheta * temp) / (
        length * (4.0 / 3.0 - masspole * costheta * costheta / total_mass))
    xacc = temp - polemass_length * thetaacc * costheta / total_mass
    x = x + tau * x_dot
    x_dot = x_dot + tau * xacc
    theta = theta + tau * theta_dot
    theta_dot = theta_dot + tau * thetaacc
    return K.stack([x, x_dot, theta, theta_dot], axis=1)
Exemplo n.º 7
0
def positional_signal(hidden_size: int,
                      length: int,
                      min_timescale: float = 1.0,
                      max_timescale: float = 1e4):
    """
    Helper function, constructing basic positional encoding.
    The code is partially based on implementation from Tensor2Tensor library
    https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py
    """

    if hidden_size % 2 != 0:
        raise ValueError(
            f"The hidden dimension of the model must be divisible by 2."
            f"Currently it is {hidden_size}")
    position = K.arange(0, length, dtype=K.floatx())
    num_timescales = hidden_size // 2
    log_timescale_increment = K.constant(
        (np.log(float(max_timescale) / float(min_timescale)) /
         (num_timescales - 1)),
        dtype=K.floatx())
    inv_timescales = (min_timescale * K.exp(
        K.arange(num_timescales, dtype=K.floatx()) * -log_timescale_increment))
    scaled_time = K.expand_dims(position, 1) * K.expand_dims(inv_timescales, 0)
    signal = K.concatenate([K.sin(scaled_time), K.cos(scaled_time)], axis=1)
    return K.expand_dims(signal, axis=0)
Exemplo n.º 8
0
    def mass_layer(self, tau_4vec):
        import tensorflow as tf
        from tensorflow.keras.layers import Concatenate
        from tensorflow.keras import backend as K
        tau_4vec = K.reshape(tau_4vec, (-1, self._njets, self._n_features))
        pt = K.exp(K.clip(tau_4vec[:, :, 0], -7., 7.)) - 0.1
        eta = tau_4vec[:, :, 1]
        phi = tau_4vec[:, :, 2]
        mass = 1.777

        px = pt * K.cos(phi)
        py = pt * K.sin(phi)
        pz = pt * tf.math.sinh(K.clip(eta, -5, 5))
        epsilon = 0.1  # avoid nan when e=0. sqrt(x)^' = -1/2 * 1/sqrt(x)
        e = K.sqrt(epsilon + px**2 + py**2 + pz**2 + mass**2)
        px = K.reshape(px, (-1, self._njets, 1))
        py = K.reshape(py, (-1, self._njets, 1))
        pz = K.reshape(pz, (-1, self._njets, 1))
        e = K.reshape(e, (-1, self._njets, 1))
        tau_4vec = Concatenate(axis=2)([px, py, pz, e])
        tau_4vec = K.sum(tau_4vec, axis=1)
        px = tau_4vec[:, 0]
        py = tau_4vec[:, 1]
        pz = tau_4vec[:, 2]
        e = tau_4vec[:, 3]
        masssq = e**2 - (px**2 + py**2 + pz**2)
        mass = K.sqrt(epsilon + masssq)
        mass = K.reshape(mass, [-1, 1])
        return mass
Exemplo n.º 9
0
    def call(self, inputs):
        """如果custom_position_ids,那么第二个输入为自定义的位置id
        """
        if self.custom_position_ids:
            seq_len = K.shape(inputs)[1]
            inputs, position_ids = inputs
            if 'float' not in K.dtype(position_ids):
                position_ids = K.cast(position_ids, K.floatx())
        else:
            input_shape = K.shape(inputs)
            batch_size, seq_len = input_shape[0], input_shape[1]
            position_ids = K.arange(0, seq_len, dtype=K.floatx())[None]

        indices = K.arange(0, self.output_dim // 2, dtype=K.floatx())
        indices = K.pow(10000.0, -2 * indices / self.output_dim)
        embeddings = tf.einsum('bn,d->bnd', position_ids, indices)
        embeddings = K.stack([K.sin(embeddings), K.cos(embeddings)], axis=-1)
        embeddings = K.reshape(embeddings, (-1, seq_len, self.output_dim))

        if self.merge_mode == 'add':
            return inputs + embeddings
        elif self.merge_mode == 'mul':
            return inputs * embeddings
        else:
            if not self.custom_position_ids:
                embeddings = K.tile(embeddings, [batch_size, 1, 1])
            return K.concatenate([inputs, embeddings])
    def call(self, inputs, **kwargs):
        bias = self.wb * inputs + self.bb
        dp = K.dot(inputs, self.wa) + self.ba
        wgts = K.sin(dp)  # or K.cos(.)

        ret = K.concatenate([K.expand_dims(bias, -1), wgts], -1)
        ret = K.reshape(ret, (-1, inputs.shape[1] * (self.k + 1)))
        return ret
Exemplo n.º 11
0
 def call(self, x):
     y_embed = K.cumsum(K.ones_like(x[:, :, :, 0]), 1)
     x_embed = K.cumsum(K.ones_like(x[:, :, :, 0]), 2)
     if self.normalize:
         eps = 1e-6
         y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
         x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
     dim_t = K.arange(self.num_pos_feats, dtype='float')
     dim = self.temperature ** (2 * (dim_t // 2) /self.num_pos_feats)
     pos_x = x_embed[:, :, :, None] / dim_t
     pos_y = y_embed[:, :, :, None] / dim_t
     pos_x = K.stack((K.sin(pos_x[:, :, :, 0::2]), K.cos(pos_x[:, :, :, 1::2])), axis=4)
     b, h, w, d, c = K.int_shape(pos_x)
     pos_x = K.reshape(pos_x, (-1, h, w, d*c))
     pos_y = K.stack((K.sin(pos_y[:, :, :, 0::2]), K.cos(pos_y[:, :, :, 1::2])), axis=4)
     pos_y = K.reshape(pos_y, (-1, h, w, d*c))
     pos = K.permute_dimensions(K.concatenate((pos_y, pos_x), axis=3), (0, 3, 1, 2))
     return pos
Exemplo n.º 12
0
def accuracy_angle(y_true, y_pred):
    from tensorflow.keras import backend as K
    import tensorflow as tf

    pred_x = -1 * K.cos(y_pred[0]) * K.sin(y_pred[1])
    pred_y = -1 * K.sin(y_pred[0])
    pred_z = -1 * K.cos(y_pred[0]) * K.cos(y_pred[1])
    pred_norm = K.sqrt(pred_x * pred_x + pred_y * pred_y + pred_z * pred_z)

    true_x = -1 * K.cos(y_true[0]) * K.sin(y_true[1])
    true_y = -1 * K.sin(y_true[0])
    true_z = -1 * K.cos(y_true[0]) * K.cos(y_true[1])
    true_norm = K.sqrt(true_x * true_x + true_y * true_y + true_z * true_z)

    angle_value = (pred_x * true_x + pred_y * true_y +
                   pred_z * true_z) / (true_norm * pred_norm)
    K.clip(angle_value, -0.9999999999, 0.999999999)
    return (tf.acos(angle_value) * 180.0) / math.pi
Exemplo n.º 13
0
    def build(self, input_shape):
        self.pos_encoding = self.add_weight(shape=(input_shape[0],self.d_model),
                                       initializer=tf.keras.initializers.Zeros(),
                                       name='pos_encoding',
                                       trainable=False)

        self.position = K.expand_dims(K.arange(0,self.max_len,dtype=tf.float32),1)
        self.div_term = K.exp(K.arange(0,self.d_model, 2,dtype='float32') * (np.log(10000.0) / self.d_model))
        self.pos_encoding[:,0::2] = K.sin(self.position * self.div_term)
        self.pos_encoding[:,1::2] = K.cos(self.position * self.div_term)
        self.pos_encoding = K.transpose(K.expand_dims(self.pos_encoding,0))
Exemplo n.º 14
0
    def call(self, x, **kwargs):
        mask = K.expand_dims(K.cast(K.arange(start=0, stop=K.shape(x)[1] + 1), 'float32'), axis=-1)
        bins = K.expand_dims(K.cast(K.arange(self.embedding_size // 2) * 2, 'float32'), axis=0)

        evens = K.dot(mask, 1.0 / K.pow(10000.0, bins / self.embedding_size))
        odds = tf.identity(evens)

        evens = K.sin(evens)[1:, :]
        odds = K.cos(odds)[1:, :]

        pos = K.reshape(K.stack([evens, odds], axis=2), (-1, K.shape(x)[1], self.embedding_size))
        return pos
Exemplo n.º 15
0
def seasonality_model(thetas, backcast_length, forecast_length, is_forecast):
    p = thetas.get_shape().as_list()[-1]
    p1, p2 = (p // 2, p // 2) if p % 2 == 0 else (p // 2, p // 2 + 1)
    t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
    s1 = K.stack([K.cos(2 * np.pi * i * t) for i in range(p1)], axis=0)
    s2 = K.stack([K.sin(2 * np.pi * i * t) for i in range(p2)], axis=0)
    if p == 1:
        s = s2
    else:
        s = K.concatenate([s1, s2], axis=0)
    s = K.cast(s, np.float32)
    return K.dot(thetas, s)
Exemplo n.º 16
0
 def idx2pos(self, pid):
     pid = K.cast(pid, 'float32')
     pid = K.expand_dims(pid, 2)
     pj = 1. / K.pow(
         10000.,
         2. / self.v_dim * K.arange(self.v_dim // 2, dtype='float32'))
     pj = K.expand_dims(pj, 0)
     pv = K.dot(pid, pj)
     pv1, pv2 = K.sin(pv), K.cos(pv)
     pv1, pv2 = K.expand_dims(pv1, 3), K.expand_dims(pv2, 3)
     pv = K.concatenate([pv1, pv2], 3)
     return K.reshape(pv, (K.shape(pv)[0], K.shape(pv)[1], self.v_dim))
Exemplo n.º 17
0
 def call(self, x):
     if (self.size == None) or (self.mode == 'sum'):
         self.size = int(x.shape[-1])
     position_j = 1. / K.pow(  10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size  )
     position_j = K.expand_dims(position_j, 0)
     #按照x的1维度累计求和,与arange一样,生成序列。只不过按照x的实际长度来
     position_i = tf.cumsum(K.ones_like(x[:,:,0]), 1)-1 
     position_i = K.expand_dims(position_i, 2)
     position_ij = K.dot(position_i, position_j)
     position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
     if self.mode == 'sum':
         return position_ij + x
     elif self.mode == 'concat':
         return K.concatenate([position_ij, x], 2)
Exemplo n.º 18
0
 def call(self, inputs, **kwargs):
     '''
     
     :param inputs: A Tensor with shape (batch_size, feature_size, 1)
     :param kwargs:
     :return: A Tensor with shape (batch_size, feature_size, length of time vector representation + 1)
     '''
     bias = self.wb * inputs + self.bb
     if self.p_activation.startswith('sin'):
         wgts = K.sin(K.dot(inputs, self.wa) + self.ba)
     elif self.p_activation.startswith('cos'):
         wgts = K.cos(K.dot(inputs, self.wa) + self.ba)
     else:
         raise NotImplementedError(
             'Neither sine or cosine periodic activation be selected.')
     return K.concatenate([bias, wgts], -1)
Exemplo n.º 19
0
 def call(self, x):
     if (self.size == None) or (self.mode == 'sum'):
         self.size = int(x.shape[-1])
     batch_size,seq_len = K.shape(x)[0],K.shape(x)[1]
     position_j = 1. / K.pow(10000., \
                              2 * K.arange(self.size / 2, dtype='float32' \
                            ) / self.size)
     position_j = K.expand_dims(position_j, 0)
     position_i = K.cumsum(K.ones_like(x[:,:,0]), 1)-1 #K.arange不支持变长,只好用这种方法生成
     position_i = K.expand_dims(position_i, 2)
     position_ij = K.dot(position_i, position_j)
     position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
     if self.mode == 'sum':
         return position_ij + x
     elif self.mode == 'concat':
         return K.concatenate([position_ij, x], 2)
Exemplo n.º 20
0
 def call(self,
          inputs: tensorflow.Tensor,
          mask: Optional[tensorflow.Tensor] = None) -> tensorflow.Tensor:
     input_shape = K.shape(inputs)
     if self.mode == self.MODE_ADD:
         batch_size, seq_len, output_dim = input_shape[0], input_shape[
             1], input_shape[2]
         pos_input = K.tile(K.expand_dims(K.arange(seq_len), axis=0),
                            [batch_size, 1])
     elif self.mode == self.MODE_CONCAT:
         batch_size, seq_len, output_dim = input_shape[0], input_shape[
             1], self.output_dim
         pos_input = K.tile(K.expand_dims(K.arange(seq_len), axis=0),
                            [batch_size, 1])
     else:
         output_dim = self.output_dim
         pos_input = inputs
     if K.dtype(pos_input) != K.floatx():
         pos_input = K.cast(pos_input, K.floatx())
     evens = K.arange(output_dim // 2) * 2
     odds = K.arange(output_dim // 2) * 2 + 1
     even_embd = K.sin(
         K.dot(
             K.expand_dims(pos_input, -1),
             K.expand_dims(
                 1.0 / K.pow(
                     10000.0,
                     K.cast(evens, K.floatx()) /
                     K.cast(output_dim, K.floatx())), 0)))
     odd_embd = K.cos(
         K.dot(
             K.expand_dims(pos_input, -1),
             K.expand_dims(
                 1.0 / K.pow(
                     10000.0,
                     K.cast((odds - 1), K.floatx()) /
                     K.cast(output_dim, K.floatx())), 0)))
     embd = K.stack([even_embd, odd_embd], axis=-1)
     output = K.reshape(embd, [-1, K.shape(inputs)[1], output_dim])
     if self.mode == self.MODE_CONCAT:
         output = K.concatenate([inputs, output], axis=-1)
     if self.mode == self.MODE_ADD:
         output += inputs
     return output
Exemplo n.º 21
0
 def call(self, inputs, **kwargs):
     q_len, m_len = K.shape(inputs[0])[1], K.shape(inputs[1])[1]
     k_len = q_len + m_len
     start, stop = k_len, -1
     if not self.directional:
         stop = -q_len
     inputs = K.tile(
         K.expand_dims(K.arange(start, stop, -1, dtype=K.floatx()), axis=0),
         [K.shape(inputs[0])[0], 1],
     )
     if self.clamp_len is not None:
         inputs = K.clip(inputs, min_value=0, max_value=self.clamp_len)
     inputs = K.expand_dims(inputs, axis=-1)
     output_dim = K.cast(self.output_dim, K.floatx())
     ranges = K.expand_dims(K.arange(0.0, self.output_dim, 2.0),
                            axis=0) / output_dim
     inverse = 1.0 / K.pow(10000.0, ranges)
     positions = inputs * inverse
     return K.concatenate([K.sin(positions), K.cos(positions)], axis=-1)
Exemplo n.º 22
0
    def call(self, x, mask=None):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])

        position_j = 1. / \
                     K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
        position_j = K.expand_dims(position_j, 0)

        position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        outputs = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)

        if self.mode == 'sum':
            if self.scale:
                outputs = outputs * self.size**0.5
            return x + outputs
        elif self.mode == 'concat':
            return K.concatenate([outputs, x], 2)
Exemplo n.º 23
0
    def call(self, inputs):
        theta_b_output, theta_f_output = super(SeasonalBlock,
                                               self).call(inputs)

        t = K.cast(K.arange(-self.fdw, self.fw, 1) / self.fdw, tf.float32)

        cos_num = self.theta_units // 2
        sin_num = (self.theta_units // 2 if self.theta_units %
                   2 == 0 else self.theta_units // 2 + 1)

        cos = K.stack([K.cos(2 * np.pi * i * t) for i in range(cos_num)],
                      axis=0)
        sin = K.stack([K.sin(2 * np.pi * i * t) for i in range(sin_num)],
                      axis=0)

        s = K.concatenate([cos, sin], axis=0)
        s_b = s[:, :self.fdw]
        s_f = s[:, self.fdw:]

        backcast = K.dot(theta_b_output, s_b)
        forecast = K.dot(theta_f_output, s_f)

        return backcast, forecast
 def call(self, inputs, mask=None):
     
     inputs, pos_input = inputs
     batch_size, seq_len, output_dim = self._get_shape(inputs)
     
     if self.mode == self.MODE_EXPAND:
         pos_input = inputs
     
     if K.dtype(pos_input) != K.floatx():
         pos_input = K.cast(pos_input, K.floatx())
     
     evens = K.arange(0, output_dim // 2) * 2
     odds = K.arange(0, output_dim // 2) * 2 + 1
     even_embd = K.sin(
         K.dot(
             K.expand_dims(pos_input, -1),
             K.expand_dims(1.0 / K.pow(
                 10000.0,
                 K.cast(evens, K.floatx()) / K.cast(output_dim, K.floatx())
             ), 0)
         )
     )
     odd_embd = K.cos(
         K.dot(
             K.expand_dims(pos_input, -1),
             K.expand_dims(1.0 / K.pow(
                 10000.0, K.cast((odds - 1), K.floatx()) / K.cast(output_dim, K.floatx())
             ), 0)
         )
     )
     embd = K.stack([even_embd, odd_embd], axis=-1)
     output = K.reshape(embd, [-1, seq_len, output_dim])
     if self.mode == self.MODE_CONCAT:
         output = K.concatenate([inputs, output], axis=-1)
     if self.mode == self.MODE_ADD:
         output += inputs
     return output
Exemplo n.º 25
0
    def _rotation_matrix_axis(self, dim, theta):
        # following are left handed system (clockwise rotation)
        # IMPORTANT: different to MATLAB version, this dim starts from 0, instead of 1
        if dim == 0:  # x-axis
            rm = tf.stack([[1.0, 0.0, 0.0], [0.0,
                                             K.cos(theta), -K.sin(theta)],
                           [0.0, K.sin(theta), K.cos(theta)]])
        elif dim == 1:  # y-axis
            rm = tf.stack([[K.cos(theta), 0.0, K.sin(theta)], [0.0, 1.0, 0.0],
                           [-K.sin(theta), 0.0,
                            K.cos(theta)]])
        elif dim == 2:  # z-axis
            rm = tf.stack([[K.cos(theta), -K.sin(theta), 0.0],
                           [K.sin(theta), K.cos(theta), 0.0], [0.0, 0.0, 1.0]])
        else:
            raise

        return rm
def build_model(hidden_layers, activation='tanh',alpha=1e-3,penalty_order=3,penalty_loss='l1'):
    inputs = keras.Input(shape=(1,))
    for i,hidden in enumerate(hidden_layers):
        if i == 0:
            h = keras.layers.Dense(hidden,activation='linear', kernel_initializer=keras.initializers.glorot_normal)(inputs)
        else:
            h = keras.layers.Dense(hidden,activation='linear')(h)
        if activation == 'tanh':
            h = K.tanh(h)
        elif activation == 'sine':
            h = K.sin(h)
        elif activation == 'elu':
            h = K.elu(h)
        elif activation == 'sigmoid':
            h = K.sigmoid(h)
        elif activation == 'relu':
            h = K.relu(h)
        #h = keras.layers
        #h = keras.layers.Dropout(rate=0.8)(h)
        #h = keras.layers.BatchNormalization()(h)
    outputs = keras.layers.Dense(1,activation='linear')(h)
    model = keras.Model(inputs, outputs)
    grad1 = K.gradients(model.output, model.input)[0]
    iterate1 = K.function([model.input], [grad1])
    grad2 = K.gradients(grad1, model.input)[0]
    iterate2 = K.function([model.input], [grad2])
    if penalty_order == 2:
        tt = grad2
    elif penalty_order == 3:
        grad3 = K.gradients(grad2, model.input)[0]
        tt = grad3
    if penalty_loss == 'l1':
        model.compile(optimizer='Adam', loss=penalty_l1(tt, alpha=alpha))
    elif penalty_loss == 'l2': 
        model.compile(optimizer='Adam', loss=penalty_l2(tt, alpha=alpha))
    return model,iterate1,iterate2
Exemplo n.º 27
0
 def noised():
     Ni = tf.shape(x)[0] #This is the number in the batch
     #get an angle to shift each image in the batch
     anglesx = K.clip( self.amount*K.random_normal((Ni,)),   self.lower,   self.upper)
     anglesy = K.clip( self.amount*K.random_normal((Ni,)),   self.lower,   self.upper)
     anglesz = K.clip( self.amount*K.random_normal((Ni,)),   self.lower,   self.upper)
     #We are going to post multiply the vector (x'=xR) with the matrix 
     #rather than the normal way (x'=Rx)
     #so we use the transpose of what is shown in literature for R
     zeros = tf.zeros((Ni,))
     ones = tf.ones((Ni,))
     Rx = K.stack(  (K.stack((ones, zeros, zeros), axis=1),  
                     K.stack((zeros, K.cos(anglesx),K.sin(anglesx)),axis=1)  ,
                     K.stack((zeros, -K.sin(anglesx),K.cos(anglesx)),axis=1))   ,
                  axis=1)
     Ry = K.stack(  (K.stack((K.cos(anglesy), zeros, -K.sin(anglesy)),axis=1),  
                     K.stack((zeros, ones, zeros),axis=1)  ,
                     K.stack((K.sin(anglesy), zeros, K.cos(anglesy)),axis=1))   ,
                  axis=1)
     Rz = K.stack(  (K.stack((K.cos(anglesz), K.sin(anglesz), zeros),axis=1),  
                     K.stack((-K.sin(anglesz), K.cos(anglesz),zeros),axis=1)  ,
                     K.stack((zeros,zeros,ones),axis=1))   ,
                  axis=1)
     return tf.matmul(x,tf.matmul(Rx,tf.matmul(Ry,Rz))) 
Exemplo n.º 28
0
 def call(self, Z):
     m = self.epsilon * (K.sin(Z) - K.cos(Z))
     A = K.maximum(m, Z)
     return A
def custom_activation(x):
    # activation function used in NN
    return K.sin(x)
Exemplo n.º 30
0
def f_model(w, x):
    return w[0] * K.sin(x)