Exemple #1
0
def PST(I, LPF, Phase_strength, Warp_strength, Threshold_min, Threshold_max):
    #inverting Threshold_min to simplyfy optimization porcess, so we can clip all variable between 0 and 1
    LPF = ops.convert_to_tensor_v2(LPF)
    Phase_strength = ops.convert_to_tensor_v2(Phase_strength)
    Warp_strength = ops.convert_to_tensor_v2(Warp_strength)
    I = ops.convert_to_tensor_v2(I)
    Threshold_min = ops.convert_to_tensor_v2(Threshold_min)
    Threshold_max = ops.convert_to_tensor_v2(Threshold_max)

    Threshold_min = -Threshold_min
    L = 0.5
    x = tf.linspace(-L, L, I.shape[0])
    y = tf.linspace(-L, L, I.shape[1])
    [X1, Y1] = (tf.meshgrid(x, y))
    X = tf.transpose(X1)
    Y = tf.transpose(Y1)
    [THETA, RHO] = cart2pol(X, Y)
    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = sig.fft2d(tf.dtypes.cast(I, tf.complex64))

    tmp6 = (LPF**2.0) / tfm.log(2.0)
    tmp5 = tfm.sqrt(tmp6)
    tmp4 = (tfm.divide(RHO, tmp5))
    tmp3 = -tfm.pow(tmp4, 2)
    tmp2 = tfm.exp(tmp3)
    expo = fftshift(tmp2)
    Image_orig_filtered = tfm.real(
        sig.ifft2d((tfm.multiply(tf.dtypes.cast(Image_orig_f, tf.complex64),
                                 tf.dtypes.cast(expo, tf.complex64)))))
    # Constructing the PST Kernel
    tp1 = tfm.multiply(RHO, Warp_strength)
    PST_Kernel_1 = tfm.multiply(
        tp1, tfm.atan(tfm.multiply(RHO, Warp_strength))
    ) - 0.5 * tfm.log(1.0 + tfm.pow(tf.multiply(RHO, Warp_strength), 2.0))
    PST_Kernel = PST_Kernel_1 / tfm.reduce_max(PST_Kernel_1) * Phase_strength
    # Apply the PST Kernel
    temp = tfm.multiply(
        fftshift(
            tfm.exp(
                tfm.multiply(tf.dtypes.complex(0.0, -1.0),
                             tf.dtypes.cast(PST_Kernel,
                                            tf.dtypes.complex64)))),
        sig.fft2d(tf.dtypes.cast(Image_orig_filtered, tf.dtypes.complex64)))
    Image_orig_filtered_PST = sig.ifft2d(temp)

    # Calculate phase of the transformed image
    PHI_features = tfm.angle(Image_orig_filtered_PST)

    out = PHI_features
    out = (out / tfm.reduce_max(out)) * 3

    return out
Exemple #2
0
    def create_model(self):
        """Create RNN model."""
        n_filter = 64

        spec = layers.Input(shape=[None, 40, 1], dtype=np.float32)
        x = layers.Conv2D(n_filter, (3, 3), padding="same",
                          activation=None)(spec)
        x = layers.BatchNormalization(momentum=0.95)(x)
        x = layers.ReLU()(x)
        x = layers.Conv2D(n_filter, (3, 3), padding="same", activation=None)(x)
        x = layers.BatchNormalization(momentum=0.95)(x)
        x = layers.ReLU()(x)
        x = layers.MaxPool2D((1, 2))(x)

        x = layers.Conv2D(n_filter, (3, 3), padding="same", activation=None)(x)
        x = layers.BatchNormalization(momentum=0.95)(x)
        x = layers.ReLU()(x)
        x = layers.Conv2D(n_filter, (3, 3), padding="same", activation=None)(x)
        x = layers.BatchNormalization(momentum=0.95)(x)
        x = layers.ReLU()(x)
        x = layers.MaxPool2D((1, 2))(x)

        x = layers.Conv2D(n_filter, (3, 3), padding="same", activation=None)(x)
        x = layers.BatchNormalization(momentum=0.95)(x)
        x = layers.ReLU()(x)
        x = layers.Conv2D(n_filter, (3, 3), padding="same", activation=None)(x)
        x = layers.BatchNormalization(momentum=0.95)(x)
        x = layers.ReLU()(x)
        x = layers.MaxPool2D((1, 2))(x)

        x = math.reduce_max(x, axis=-2)

        x = layers.Bidirectional(
            layers.GRU(64,
                       return_sequences=True,
                       recurrent_activation='sigmoid'))(x)
        x = layers.Bidirectional(
            layers.GRU(64,
                       return_sequences=True,
                       recurrent_activation='sigmoid'))(x)

        x = layers.TimeDistributed(layers.Dense(64, activation="sigmoid"))(x)
        local_pred = layers.TimeDistributed(
            layers.Dense(1, activation="sigmoid"))(x)
        pred = math.reduce_max(local_pred, axis=-2)
        return keras.Model(inputs=spec, outputs=[pred, local_pred])
Exemple #3
0
    def train_step(self, o, r, d, a, sp_batch):
        target_q = self.t_model(sp_batch, training=False)
        q_samp = r + tf.cast(tm.logical_not(d), tf.float32) * \
                     hp.Q_discount * \
                     tm.reduce_max(target_q, axis=1)
        mask = tf.one_hot(a, self.action_n, dtype=tf.float32)
        with tf.GradientTape() as tape:
            q = self.model(o, training=True)
            q_sa = tf.math.reduce_sum(q * mask, axis=1)
            loss = keras.losses.MSE(q_samp, q_sa)
            scaled_loss = self.optimizer.get_scaled_loss(loss)

        trainable_vars = self.model.trainable_variables
        scaled_gradients = tape.gradient(scaled_loss, trainable_vars)
        gradients = self.optimizer.get_unscaled_gradients(scaled_gradients)
        self.optimizer.apply_gradients(zip(gradients, trainable_vars))
    def train_step(self, data):
        o, r, d, a, target_q = data
        num_actions = target_q.shape[-1]
        q_samp = r + tf.cast(tm.logical_not(d), tf.float32) * \
                     hp.Q_discount * \
                     tm.reduce_max(target_q, axis=1)
        mask = tf.one_hot(a, num_actions, dtype=tf.float32)

        with tf.GradientTape() as tape:
            q = self(o, training=True)
            q_sa = tf.math.reduce_sum(q * mask, axis=1)
            loss = keras.losses.MSE(q_samp, q_sa)

        trainable_vars = self.trainable_variables
        gradients = tape.gradient(loss, trainable_vars)
        self.optimizer.apply_gradients(zip(gradients, trainable_vars))
        self.compiled_metrics.update_state(q_sa, q_samp)
        return {m.name: m.result() for m in self.metrics}
Exemple #5
0
        def proceed():
            num_tfs = current_state.shape[0]
            new_state = current_state
            Δrange = np.arange(self.lower, self.upper + 1, dtype='float64')
            Δrange_tf = tf.range(self.lower, self.upper + 1, dtype='float64')
            for i in range(num_tfs):
                # Generate normalised cumulative distribution
                probs = list()
                mask = np.zeros((num_tfs, ), dtype='float64')
                mask[i] = 1

                for Δ in Δrange:
                    test_state = (1 - mask) * new_state + mask * Δ

                    # if j == 0:
                    #     cumsum.append(tf.reduce_sum(self.likelihood.genes(
                    #         all_states=all_states,
                    #         state_indices=self.state_indices,
                    #         Δ=test_state,
                    #     )) + tf.reduce_sum(self.prior.log_prob(Δ)))
                    # else:

                    probs.append(
                        tf.reduce_sum(
                            self.likelihood.genes(
                                all_states=all_states,
                                state_indices=self.state_indices,
                                Δ=test_state,
                            )) + tf.reduce_sum(self.prior.log_prob(Δ)))
                # curri = tf.cast(current_state[i], 'int64')
                # start_index = tf.reduce_max([self.lower, curri-2])
                # probs = tf.gather(probs, tf.range(start_index,
                #                                   tf.reduce_min([self.upper+1, curri+3])))

                probs = tf.stack(probs) - tfm.reduce_max(probs)
                probs = tfm.exp(probs)
                probs = probs / tfm.reduce_sum(probs)
                cumsum = tfm.cumsum(probs)
                u = tf.random.uniform([], dtype='float64')
                index = tf.where(
                    cumsum == tf.reduce_min(cumsum[(cumsum - u) > 0]))
                chosen = Δrange_tf[index[0][0]]
                new_state = (1 - mask) * new_state + mask * chosen
            return new_state
Exemple #6
0
    def train_step(self, o, r, d, a, sp_batch, total_step, weights):
        target_q = self.t_model(sp_batch, training=False)
        q_samp = r + tf.cast(tm.logical_not(d), tf.float32) * \
                     hp.Q_discount * \
                     tm.reduce_max(target_q, axis=1)
        mask = tf.one_hot(a, self.action_n, dtype=tf.float32)
        with tf.GradientTape() as tape:
            q = self.model(o, training=True)
            q_sa = tf.math.reduce_sum(q * mask, axis=1)
            # loss = keras.losses.MSE(q_samp, q_sa)
            unweighted_loss = tf.math.square(q_samp - q_sa)
            loss = tf.math.reduce_mean(weights * unweighted_loss)
            tf.summary.scalar('Loss', loss, total_step)
            scaled_loss = self.model.optimizer.get_scaled_loss(loss)

        priority = (tf.math.abs(q_samp - q_sa) + hp.Buf.epsilon)**hp.Buf.alpha
        trainable_vars = self.model.trainable_variables
        scaled_gradients = tape.gradient(scaled_loss, trainable_vars)
        gradients = self.model.optimizer.get_unscaled_gradients(
            scaled_gradients)
        self.model.optimizer.apply_gradients(zip(gradients, trainable_vars))
        return priority
def toTarget(tensor):
    buf1 = reduce_max(K.flatten(tensor))
    print(buf1)
    buf2 = tensor
    return imresize(buf2 / buf1, 8.0)