예제 #1
0
    def time_step(self, x, y1, y2):
        """Take a step through time.

        Parameters
        ----------
        x : Input value(s) at current time step, batched in first dimension
        y1 : Scalar wave field one time step ago (part of the hidden state)
        y2 : Scalar wave field two time steps ago (part of the hidden state)
        """
        dt = self.dt
        c = self.c
        b = self.b

        term_a = 2 + dt**2*math.multiply(c.pow(2), self.compute_laplacian(y1))

        term_two = math.multiply(-1 - dt * b, y2)
        denominator = dt ** (-2) + b * 0.5 * dt ** (-1)
        y = math.multiply(denominator.pow(-1),
                          math.add(term_a, term_two))

        # Insert the source
        y_out = y[:, self.src_x, self.src_y]
        y_out = y_out + tf.broadcast_to(x, tf.shape(y_out))

        return y_out, y1
예제 #2
0
파일: solver.py 프로젝트: nathannau/DLCube2
    def trainModel(self, infos):

        with self.graph.as_default():
            states = tf.constant([i["state"].tolist() for i in infos],
                                 shape=[infos.size, 24])
            rewards = tf.constant([i["reward"] for i in infos],
                                  dtype=tf.float32,
                                  shape=[infos.size, 1])
            # next_states = tf.constant([i["next_state"].tolist() for i in infos], shape=[infos.size, 24])
            actions = tf.constant([i["action"] for i in infos],
                                  dtype=tf.float32,
                                  shape=[infos.size, 12])

            # Qtargets = tf.constant(self.model.predict(next_states, steps=1), shape=[infos.size, 12])
            # Recupere l'etat actuel
            targets = tf.constant(self.model.predict(states, steps=1),
                                  shape=[infos.size, 12])
            # Calcure le mask negatif
            mask = tf.ones([infos.size, 12], dtype=tf.float32)
            mask = tfm.subtract(mask, actions)
            # Applique le mask négatif
            targets = tfm.multiply(targets, mask)

            # Calcure le mask positif
            mask = tfm.multiply(rewards, actions)
            # Applique le mask positif
            targets = tfm.add(targets, mask)

            self.model.fit(states, targets, steps_per_epoch=200)  # 1000
예제 #3
0
        def _loss_fn(y_true, y_pred):
            # Mask the predictions to ignore padded records
            mask = kwargs.get("mask")
            y_true = math.multiply(tf.cast(mask, tf.float32),
                                   tf.cast(y_true, tf.float32))
            y_pred = math.multiply(tf.cast(mask, tf.float32),
                                   tf.cast(y_pred, tf.float32))

            return cce(y_true, y_pred)
예제 #4
0
def actor_predictive_clustering_loss(y_true,
                                     y_pred,
                                     cluster_assignment_probs,
                                     y_type='categorical',
                                     name='actor_pred_clus_L'):
    """
    Compute prediction clustering loss between predicted output and true output with probability weights
    from cluster assignments.
    Inputs have shape (batch_size, T, num_classes) for y_pred and (batch_size, num_classes) for y_true
    or (batch_size, num_cluster) for cluster_assignment_probs.

    There are a variety of different settings, all weighted sample-wise by assignment probability:

    - Binary:  Computes Binary Cross Entropy. Class/Event occurence is matched with a dimension.
                y_true with entries in [0,1], and y_pred with value between (0,1)
    - Categorical: Computes Cross Entropy Loss. Class assigned by highest value dimension.
                y_true is a one-hot encoding, and y_pred is a probabilistic vector.
    - Continuous: Computes L2 loss. Similar to the Binary case, but class attributes are continuous.
                y_true and y_pred both with real-value entries.

    Returns: Loss value between sample true y and predicted y based on y_type of shape (batch_size)
    """
    y_true_temp_ = tf.repeat(tf.expand_dims(y_true, axis=1),
                             repeats=y_pred.shape[1],
                             axis=1,
                             name='true_y_time')

    if y_type == 'binary':
        # Compute Binary Cross Entropy weighted by cluster assignment probabilities.
        sample_loss = multiply(
            tf.reduce_sum(y_true_temp_ * log(y_pred) +
                          (1 - y_true_temp_) * log(y_pred),
                          axis=-1), cluster_assignment_probs)
        batch_loss = -tf.reduce_mean(sample_loss, name=name)

        return batch_loss

    elif y_type == 'categorical':
        # Compute Categorical Cross Entropy weighted by cluster assignment probabilities.
        sample_loss = multiply(
            tf.reduce_sum(y_true_temp_ * log(y_pred), axis=-1),
            cluster_assignment_probs)
        batch_loss = -tf.reduce_mean(sample_loss, name=name)

        return batch_loss

    elif y_type == 'continuous':
        # Compute L2 Loss weighted by cluster assigment probabilities.
        sample_loss = multiply(tf.reduce_sum((y_true - y_pred)**2, axis=-1),
                               cluster_assignment_probs)
        batch_loss = tf.reduce_mean(sample_loss, name=name)

        return batch_loss
예제 #5
0
    def call(self, x, beta):
        if self.res == 1:
            h_stack, v_stack = tf.unstack(x, axis=-1)
        else:
            h_stack = x
            v_stack = x
        #Vertical stack is acted by a vertical convolution
        #equivalent to a masked one
        v_stack = self.ver_cropping(v_stack)
        v_stack = self.ver_padding(v_stack)
        v_stack = self.ver_conv(v_stack)
        v_stack += self.ver_seq(beta)

        #Horizontal stack is acted by a horizontal convolution
        #equivalent to a masked one- h_stack2 is kept for later
        h_stack2 = h_stack
        h_stack = self.hor_cropping(h_stack)
        h_stack = self.hor_padding(h_stack)
        h_stack = self.hor_conv(h_stack)
        h_stack = tfm.add(h_stack, self.hor_seq(beta))

        #Add v_stack to h_stack
        h_stack = tfm.add(h_stack, v_stack)

        #"Gating" performed on horizontal stack
        h_stack0, h_stack1 = tf.split(h_stack, 2, axis=-1)
        h_stack0 = tfk.activations.tanh(h_stack0)
        h_stack1 = tfk.activations.sigmoid(h_stack1)
        h_stack = tfm.multiply(h_stack0, h_stack1)

        #"Gating" and convolving vertical stack
        if not self.last_layer:
            v_stack0, v_stack1 = tf.split(v_stack, 2, axis=-1)
            v_stack0 = tfk.activations.tanh(v_stack0)
            v_stack1 = tfk.activations.sigmoid(v_stack1)
            v_stack = tfm.multiply(v_stack0, v_stack1)

            v_stack = self.ver_conv2(v_stack)

        #Convolve h_stack2, h_stack and connect them
        h_stack = self.hor_conv2(h_stack)
        if self.res:
            h_stack2 = self.res_conv(h_stack2)
            h_stack = tfm.add(h_stack, h_stack2)

        if self.last_layer:
            output = h_stack
        else:
            output = tf.stack([h_stack, v_stack], axis=-1)
        return output
예제 #6
0
    def calculate_protein(self, fbar, k_fbar, Δ):  # Calculate p_i vector
        τ = self.data.τ
        f_i = inverse_positivity(fbar)
        δ_i = tf.reshape(logit(k_fbar), (-1, 1))
        if self.options.delays:
            # Add delay
            Δ = tf.cast(Δ, 'int32')

            for r in range(self.num_replicates):
                f_ir = rotate(f_i[r], -Δ)
                mask = ~tf.sequence_mask(Δ, f_i.shape[2])
                f_ir = tf.where(mask, f_ir, 0)
                mask = np.zeros((self.num_replicates, 1, 1), dtype='float64')
                mask[r] = 1
                f_i = (1 - mask) * f_i + mask * f_ir

        # Approximate integral (trapezoid rule)
        resolution = τ[1] - τ[0]
        sum_term = tfm.multiply(tfm.exp(δ_i * τ), f_i)
        cumsum = 0.5 * resolution * tfm.cumsum(
            sum_term[:, :, :-1] + sum_term[:, :, 1:], axis=2)
        integrals = tf.concat([
            tf.zeros((self.num_replicates, self.num_tfs, 1), dtype='float64'),
            cumsum
        ],
                              axis=2)
        exp_δt = tfm.exp(-δ_i * τ)
        p_i = exp_δt * integrals
        return p_i
def augmentation(img, msk):

    # Call in skimage package, which will be used for transformations.
    import tensorflow.math as Math

    # Create some random floats, which will be used in augmentation steps.
    tilt = tf.random.uniform(shape=[], minval=-30, maxval=30, dtype=tf.float32)
    dx = tf.random.uniform(shape=[], minval=-5, maxval=5, dtype=tf.float32)
    dy = tf.random.uniform(shape=[], minval=-5, maxval=5, dtype=tf.float32)

    # Use TensforFlow-style if conditionals, used to flip image and mask.
    img = tf.cond(tilt > 0, lambda: tf.image.flip_left_right(img),
                  lambda: tf.image.flip_up_down(img))
    msk = tf.cond(tilt > 0, lambda: tf.image.flip_left_right(msk),
                  lambda: tf.image.flip_up_down(msk))

    # Rotate the image and mask to some degree.
    # img = rotate(img, angle = tilt, mode = 'reflect')
    # msk = rotate(msk, angle = tilt, mode = 'reflect')
    toRads = Math.multiply(Math.divide(tilt, 180), tf.constant(math.pi))

    img = tfa.image.rotate(img, toRads)
    msk = tfa.image.rotate(msk, toRads)

    # Affine transformation
    img = tfa.image.translate(img, [dx, dy], 'BILINEAR')
    msk = tfa.image.translate(msk, [dx, dy], 'BILINEAR')

    # Convert the inputs back into tensors, put back into a tuple.
    finalTuple = (img, msk)

    return finalTuple
예제 #8
0
        def reloss(y_true, y_pred):
            """
            Custom loss to not penalize when the prediction is negative (dissimilar) and true label is 0.
            """

            loss_filter = maximum(y_true, y_pred)
            loss_filter = divide_no_nan(
                loss_filter, loss_filter)  # normalize any positive value to 1
            return multiply(loss_filter, MAE(y_true, y_pred))
예제 #9
0
 def call(self, x):
     for i in range(self.net_depth + 1):
         x = self.custom_layers[i](x)
     if self.z2 and x.shape[1] == self.L:
         x_hat = tfm.multiply(x, self.x_hat_mask)
         x_hat = tfm.add(x_hat, self.x_hat_bias)
     else:
         x_hat = x
     return x_hat
예제 #10
0
파일: model.py 프로젝트: TTomilin/GVizDoom
    def call(self, inputs):
        # Generate random value matrix (newly generated with each call) - vector version
        self.kernel_epsilon = K.random_normal(shape=(self.input_dim,
                                                     self.units))

        w = self.kernel + math.multiply(self.kernel_sigma, self.kernel_epsilon)
        output = K.dot(inputs, w)

        if self.use_bias:
            # Generate random bias vector
            self.bias_epsilon = K.random_normal(shape=(self.units, ))

            bias = self.bias + math.multiply(self.bias_sigma,
                                             self.bias_epsilon)
            output = output + bias
        if self.activation is not None:
            output = self.activation(output)
        return output
예제 #11
0
    def call(self , input):

        shape = list(self.shape[1:])

        shape.append(1)

        x = Reshape(shape)(input)

        return multiply(x , self.kernel)
예제 #12
0
    def call(self, x):
        rank = len(x.shape)
        X = [0] * x.shape[-1]

        for i in range(self.n_activations):
            # Broadcasting is required for the inputs.
            X[i] = tfm.tanh(tfm.multiply(self.alphas[i], x[:, :, i:i + 1]))

        X = K.concatenate(X, axis=2)

        return X
예제 #13
0
    def predict_m(self, kbar, k_fbar, wbar, fbar, w_0bar, Δ):
        # Take relevant parameters out of log-space
        if self.options.kinetic_exponential:
            kin = (tf.reshape(tf.exp(logit(kbar[:, i])), (-1, 1))
                   for i in range(kbar.shape[1]))
        else:
            kin = (tf.reshape(logit(kbar[:, i]), (-1, 1))
                   for i in range(kbar.shape[1]))
        if self.options.initial_conditions:
            a_j, b_j, d_j, s_j = kin
        else:
            b_j, d_j, s_j = kin
        w = (wbar)
        w_0 = tf.reshape((w_0bar), (-1, 1))
        τ = self.data.τ
        N_p = self.data.τ.shape[0]

        p_i = inverse_positivity(fbar)
        if self.options.translation:
            p_i = self.calculate_protein(fbar, k_fbar, Δ)

        # Calculate m_pred
        resolution = τ[1] - τ[0]
        interactions = tf.matmul(w, tfm.log(p_i + 1e-100)) + w_0
        G = tfm.sigmoid(interactions)  # TF Activation Function (sigmoid)
        sum_term = G * tfm.exp(d_j * τ)
        integrals = tf.concat(
            [
                tf.zeros((self.num_replicates, self.num_genes, 1),
                         dtype='float64'),  # Trapezoid rule
                0.5 * resolution *
                tfm.cumsum(sum_term[:, :, :-1] + sum_term[:, :, 1:], axis=2)
            ],
            axis=2)
        exp_dt = tfm.exp(-d_j * τ)
        integrals = tfm.multiply(exp_dt, integrals)

        m_pred = b_j / d_j + s_j * integrals
        if self.options.initial_conditions:
            m_pred += tfm.multiply((a_j - b_j / d_j), exp_dt)
        return m_pred
예제 #14
0
    def call(self, inputs, mask=None):
        # Builds input
        x, y = tf.split(inputs, num_or_size_splits=2, axis=1)
        x2 = tfm.square(x)
        y2 = tfm.square(y)
        xy = tfm.multiply(x, y)

        quad_inputs = tf.stack(
            [x2, xy, y2, x, y, tf.ones((tf.shape(x)))], axis=1)
        quad_outputs = tf.squeeze(tf.matmul(self.coeffs, quad_inputs),
                                  axis=[1])

        return quad_outputs
예제 #15
0
파일: model.py 프로젝트: TTomilin/GVizDoom
    def call(self, inputs):
        # add noise to kernel
        self.kernel_epsilon = K.random_normal(shape=self.kernel_shape)

        w = self.kernel + math.multiply(self.kernel_sigma, self.kernel_epsilon)

        outputs = K.conv2d(inputs,
                           w,
                           strides=self.strides,
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate)

        if self.use_bias:
            self.bias_epsilon = K.random_normal(shape=(self.filters, ))

            b = self.bias + math.multiply(self.bias_sigma, self.bias_epsilon)
            outputs = K.bias_add(outputs, b, data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
예제 #16
0
    def call(self, inputs, mask=None):
        # Builds input
        x, y = tf.split(inputs, num_or_size_splits=2, axis=1)
        x2 = tfm.square(x)
        y2 = tfm.square(y)
        xy = tfm.multiply(x, y)

        quad_inputs = tf.stack(
            [x2, xy, y2, x, y, tf.ones((tf.shape(x)))], axis=1)
        quad_outputs = tf.reduce_sum(
            tf.multiply(self.coeffs, tf.transpose(quad_inputs,
                                                  perm=(0, 2, 1))), 2)

        return quad_outputs
def compute_angle_tensor(pts1, pts2):
    """ Compute the angle between pt1 and pt2 with respect to the origin
        Input:
          pts1: batch_size x 1 x 3 tensor
          pts2: batch_size x 1 x 3 tensor
    """
    b = tf.constant([0.,0.,0.])
    angle_diff = []
    for pt1, pt2 in zip(pts1, pts2):
        ba = tf.subtract(pt1, b)
        bc = tf.subtract(pt2, b)
        cosine_angle = tm.divide(tf.tensordot(ba, bc, 1), tm.multiply(tf.norm(ba), tf.norm(bc)))
        angle = tm.acos(cosine_angle)
        angle_diff.append(tf.cast(angle, tf.float32))
    return tf.stack(angle_diff, axis=0)
예제 #18
0
def psnr(y_label, y_pred):
    """
    PSNR is Peek Signal to Noise Ratio, which is similar to mean squared error.

    It can be calculated as
    PSNR = 20 * log10(MAXp) - 10 * log10(MSE)

    When providing an unscaled input, MAXp = 255. Therefore 20 * log10(255)== 48.1308036087.
    However, since we are scaling our input, MAXp = 1. Therefore 20 * log10(1) = 0.
    Thus we remove that component completely and only compute the remaining MSE component.
    """
    _result = subtract(y_label, y_pred)
    _result = square(_result)
    _result = tf_mean(_result)
    _result = multiply(-10., log(_result, 10.))
    return _result
예제 #19
0
    def call(self, x, beta):
        if self.res:
            l_stack, m_stack, r_stack = tf.unstack(x, axis=-1)
            m_stack2 = m_stack
        else:
            l_stack = x
            r_stack = x
            m_stack = tf.zeros_like(x)
        #Left stack is cropped, padded followed by convolution
        l_stack = self.l_cropping(l_stack)
        l_stack = self.l_padding(l_stack)
        l_stack = self.l_conv(l_stack)
        #Conditioning on beta
        l_stack += self.l_seq(beta)

        #Right stack is cropped, padded followed by convolution
        r_stack = self.r_cropping(r_stack)
        r_stack = self.r_padding(r_stack)
        r_stack = self.r_conv(r_stack)
        #Conditioning on beta
        r_stack += self.r_seq(beta)

        #Update/initialise m_stack
        m_stack = self.m_conv(m_stack)
        m_stack += tfm.add(l_stack, r_stack)

        #Gating operation on m_stack
        m_stack0, m_stack1 = tf.split(m_stack, 2, axis=-1)
        m_stack0 = tfk.activations.tanh(m_stack0)
        m_stack1 = tfk.activations.sigmoid(m_stack1)
        m_stack = tfm.multiply(m_stack0, m_stack1)

        if not self.last_layer:
            l_stack = self.l_conv2(l_stack)
            r_stack = self.r_conv2(r_stack)

        #Convolve m_stack2, m_stack and connect them
        m_stack = self.m_conv2(m_stack)
        if self.res:
            m_stack2 = self.m_res_conv(m_stack2)
            m_stack = tfm.add(m_stack, m_stack2)

        if self.last_layer:
            output = m_stack
        else:
            output = tf.stack([l_stack, m_stack, r_stack], axis=-1)
        return output
예제 #20
0
def reparameterisation_trick(mu, log_sig_sq):
    '''
    Sample from Gaussian such that it stays differentiable
    INPUTS:
        mu - mean of distribution
        log_sig_sq - log variance of diatribution
    OUTPUTS:
        samp - sample from distribution
    '''

    eps = tf.random.normal([tf.shape(mu)[0], tf.shape(mu)[1]],
                           0,
                           1.,
                           dtype=tf.float32)
    samp = tfm.add(mu, tfm.multiply(tfm.sqrt(tfm.exp(log_sig_sq)), eps))

    return samp
예제 #21
0
파일: PST_func.py 프로젝트: Adigorla/PCNN
def PST(I, LPF, Phase_strength, Warp_strength, Threshold_min, Threshold_max):
    #inverting Threshold_min to simplyfy optimization porcess, so we can clip all variable between 0 and 1
    LPF = ops.convert_to_tensor_v2(LPF)
    Phase_strength = ops.convert_to_tensor_v2(Phase_strength)
    Warp_strength = ops.convert_to_tensor_v2(Warp_strength)
    I = ops.convert_to_tensor_v2(I)
    Threshold_min = ops.convert_to_tensor_v2(Threshold_min)
    Threshold_max = ops.convert_to_tensor_v2(Threshold_max)

    Threshold_min = -Threshold_min
    L = 0.5
    x = tf.linspace(-L, L, I.shape[0])
    y = tf.linspace(-L, L, I.shape[1])
    [X1, Y1] = (tf.meshgrid(x, y))
    X = tf.transpose(X1)
    Y = tf.transpose(Y1)
    [THETA, RHO] = cart2pol(X, Y)
    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = sig.fft2d(tf.dtypes.cast(I, tf.complex64))

    tmp6 = (LPF**2.0) / tfm.log(2.0)
    tmp5 = tfm.sqrt(tmp6)
    tmp4 = (tfm.divide(RHO, tmp5))
    tmp3 = -tfm.pow(tmp4, 2)
    tmp2 = tfm.exp(tmp3)
    expo = fftshift(tmp2)
    Image_orig_filtered = tfm.real(
        sig.ifft2d((tfm.multiply(tf.dtypes.cast(Image_orig_f, tf.complex64),
                                 tf.dtypes.cast(expo, tf.complex64)))))
    # Constructing the PST Kernel
    tp1 = tfm.multiply(RHO, Warp_strength)
    PST_Kernel_1 = tfm.multiply(
        tp1, tfm.atan(tfm.multiply(RHO, Warp_strength))
    ) - 0.5 * tfm.log(1.0 + tfm.pow(tf.multiply(RHO, Warp_strength), 2.0))
    PST_Kernel = PST_Kernel_1 / tfm.reduce_max(PST_Kernel_1) * Phase_strength
    # Apply the PST Kernel
    temp = tfm.multiply(
        fftshift(
            tfm.exp(
                tfm.multiply(tf.dtypes.complex(0.0, -1.0),
                             tf.dtypes.cast(PST_Kernel,
                                            tf.dtypes.complex64)))),
        sig.fft2d(tf.dtypes.cast(Image_orig_filtered, tf.dtypes.complex64)))
    Image_orig_filtered_PST = sig.ifft2d(temp)

    # Calculate phase of the transformed image
    PHI_features = tfm.angle(Image_orig_filtered_PST)

    out = PHI_features
    out = (out / tfm.reduce_max(out)) * 3

    return out
예제 #22
0
    def call(self, x, beta):
        """Produces the network output for a sample

        Args:
            x (float32): A sample configuration of Ising model
            beta (float32): Inverse temperature

        Returns:
            float32: Probabilities of +1 value at every lattice site
        """
        for i in range(self.net_depth):
            x = self.custom_layers[i](x, beta)
        x = self.custom_layers[self.net_depth](x)
        if self.z2 and x.shape[1] == self.L:
            x_hat = tfm.multiply(x, self.x_hat_mask)
            x_hat = tfm.add(x_hat, self.x_hat_bias)
        else:
            x_hat = x
        return x_hat
예제 #23
0
    def call(self, inputs, mask=None):
        # Saves dimensions to make code nicer
        batch_size = inputs.shape[0]
        height = inputs.shape[1]
        width = inputs.shape[2]
        num_filters = inputs.shape[3]

        # Reshapes so last 2 dimensions are a single filter
        inputs = tf.transpose(inputs, [0, 3, 1, 2])

        # Reshapes into columns for x,y
        inputs = tf.reshape(inputs, [batch_size, num_filters, -1, 2])

        # Transposes to get correct dimensions for matmul
        inputs = tf.transpose(inputs, [0, 1, 3, 2])

        # Splits tensor into x & y
        x, y = tf.split(inputs, 2, 2)

        # Calculates other components of quadratic input
        x2 = tfm.square(x)
        y2 = tfm.square(y)
        xy = tfm.multiply(x, y)

        # Builds quadratic input
        quad_input = tf.concat(
            [x2, xy, y2, x, y, tf.ones((tf.shape(x)))], axis=2)

        # Matmul -> Quadratic output
        quad_output = tf.matmul(self.coeffs, quad_input)

        # Reshapes back to original size with width / 2
        quad_output = tf.reshape(
            quad_output, [batch_size, num_filters, height,
                          int(width / 2)])

        # Rearranges axis to have num_filters last (as CONV2D expects)
        quad_output = tf.transpose(quad_output, [0, 2, 3, 1])

        return quad_output
예제 #24
0
    def call(self, x):

        X = []
        for i in range(self.n_activations):
            self.i = i
            frame = x[:, :, i:i + 1]

            sigma1 = K.variable(
                np.zeros((self.batch, frame.shape[1], frame.shape[2])))
            for j in range(self.v_order):
                j_fact = math.factorial(j)
                p = tfm.divide(K.pow(frame, j), j_fact)
                sigma1 = tfm.add(sigma1, tfm.multiply(self.v[i][j], p))

            sigma2 = K.variable(
                np.zeros((self.batch, frame.shape[1], frame.shape[2])))
            for j in range(1, self.w_order + 1):
                self.k = j
                sigma2 = tfm.add(sigma2, K.map_fn(self.basis2, frame))

            X.append(tfm.add(sigma1, sigma2))

        output = K.concatenate(X, axis=2)
        return output
예제 #25
0
    def basis2(self, x):
        #         b_2_k = K.variable(np.zeros(x.shape[0], ))

        k_1 = tf.constant(self.breakpoints[self.k - 1], dtype=tf.float32)
        #         print(k_1.shape)
        k = tf.constant(self.breakpoints[self.k], dtype=tf.float32)

        def f1():
            return tfm.add(
                tfm.subtract(tfm.divide(tfm.multiply(x, x), 2),
                             tfm.multiply(k_1, x)),
                tfm.divide(tfm.multiply(k_1, k_1), 2))

        def f2():
            val1 = tfm.divide(
                tfm.multiply(tfm.subtract(k, k_1), tfm.subtract(k, k_1)), 2)
            val2 = tfm.multiply(tfm.subtract(k, k_1), tfm.subtract(x, k))
            val = tfm.add(val1, val2)
            return val

        b2ks = [0] * x.shape[0]
        for i in range(x.shape[0]):
            b2ks[i] = tf.cond(
                tfm.logical_and(tfm.greater(x[i, 0], k_1),
                                tfm.less(x[i, 0], k)), lambda: f1(),
                lambda: f2())


#             print(b2ks[i].shape)

        b_2_k = K.concatenate(b2ks)
        #         if tfm.greater(x, k_1) and tfm.less(x, k):

        #         elif tfm.less(k, x):

        return tfm.multiply(self.w[self.i][self.k - 1], b_2_k)
 def __buildProducts(self, Labels: TF, Predictions: TF) -> TF:
     return multiply(self.__NormalizedCosts, multiply(
         Labels,
         Predictions,
     ))
예제 #27
0
 def f2():
     val1 = tfm.divide(
         tfm.multiply(tfm.subtract(k, k_1), tfm.subtract(k, k_1)), 2)
     val2 = tfm.multiply(tfm.subtract(k, k_1), tfm.subtract(x, k))
     val = tfm.add(val1, val2)
     return val
예제 #28
0
def tf_ssim(x, y, is_normalized=False):
    """
    k1 = 0.01
    k2 = 0.03
    L = 1.0 if is_normalized else 255.0
    c1 = np.power(k1 * L, 2)
    c2 = np.power(k2 * L, 2)
    c3 = c2 / 2
    """
    k1 = 0.01
    k2 = 0.03
    L = 1.0 if is_normalized else 255.0
    c1 = tf_pow(multiply(k1, L), 2.0)
    c2 = tf_pow(multiply(k2, L), 2.0)
    c3 = divide(c2, 2.0)

    # if type(x) is np.ndarray:
    #      x = tf.convert_to_tensor(x, dtype=tf.float32)
    # if type(y) is np.ndarray:
    #      y = tf.convert_to_tensor(y, dtype=tf.float32)
    """
    ux = x.mean()
    uy = y.mean()
    """
    ux = tf_mean(x)
    uy = tf_mean(y)
    """
    std_x = x.std()
    std_y = y.std()
    """
    std_x = tf_std(x)
    std_y = tf_std(y)
    """
    xy = (x - ux) * (y - uy)
    std_xy = xy.mean()
    """
    xy = multiply(subtract(x, ux), subtract(y, uy))
    std_xy = tf_mean(xy)
    """
    l_xy = (2 * ux * uy + c1) / (np.power(ux, 2) + np.power(uy, 2) + c1)
    """
    l_son = add(multiOperation(multiply, 2.0, ux, uy), c1)
    l_mom = multiOperation(add, tf_pow(ux, 2.0), tf_pow(uy, 2.0), c1)
    l_xy = divide(l_son, l_mom)
    """
    c_xy = (2 * std_x * std_y + c2) / (np.power(std_x, 2) + np.power(std_y, 2) + c2)
    """
    c_son = add(multiOperation(multiply, 2.0, std_x, std_y), c2)
    c_mom = multiOperation(add, tf_pow(std_x, 2.0), tf_pow(std_y, 2.0), c2)
    c_xy = divide(c_son, c_mom)
    """
    s_xy = (std_xy + c3) / (std_x * std_y + c3)
    """
    s_son = add(std_xy, c3)
    s_mom = add(multiply(std_x, std_y), c3)
    s_xy = divide(s_son, s_mom)

    one = tf.constant(1.0)
    _ssim = multiOperation(multiply, l_xy, c_xy, s_xy)
    _result = tf.cond(greater(_ssim, one), lambda: one, lambda: _ssim)

    return _result
예제 #29
0
 def call(self, x):
     x_hat = self.net(x)
     if self.z2:
         x_hat = tfm.multiply(x_hat, self.x_hat_mask)
         x_hat = tfm.add(x_hat, self.x_hat_bias)
     return x_hat
예제 #30
0
 def f1():
     return tfm.add(
         tfm.subtract(tfm.divide(tfm.multiply(x, x), 2),
                      tfm.multiply(k_1, x)),
         tfm.divide(tfm.multiply(k_1, k_1), 2))