def edge_weights(flatten_image, rows, cols, std_intensity=3, std_position=1.0):
    """
    flatten_image : 1 dim tf array of the row flattened image
    ( intensity is the average of the three channels)
    std_intensity : standard deviation for intensity
    std_position : standard deviation for position
    rows : rows of the original image (unflattened image)
    cols : cols of the original image (unflattened image)
    Output :
    weights :  2d tf array edge weights in the pixel graph
    Used parameters :
    n : number of pixels
    """
    A = outer_product(flatten_image, tf.ones_like(flatten_image))
    A_T = tf.transpose(A)
    intensity_weight = tf.exp(-1 * tf.square(
        tf.cast((tf.realdiv((A - A_T), std_intensity)), dtype=tf.float32)))

    xx, yy = tf.meshgrid(tf.range(rows), tf.range(cols))
    xx = tf.reshape(xx, (rows * cols, ))
    yy = tf.reshape(yy, (rows * cols, ))
    A_x = outer_product(xx, tf.ones_like(xx))
    A_y = outer_product(yy, tf.ones_like(yy))

    xi_xj = A_x - tf.transpose(A_x)
    yi_yj = A_y - tf.transpose(A_y)

    sq_distance_matrix = tf.square(xi_xj) + tf.square(yi_yj)
    sq_distance_matrix = tf.cast(sq_distance_matrix, tf.float32)
    dist_weight = tf.exp(
        -1 * tf.realdiv(sq_distance_matrix, tf.square(std_position)))
    weight = tf.multiply(intensity_weight, dist_weight)
    return weight
 def tf_preprocess(self,
                   image_size=84,
                   crop_size=92,
                   random_crop=True,
                   random_flip=True,
                   random_color=True,
                   whiten=False):
     inp = tf.placeholder(tf.uint8, [None, None, 3])
     image = tf.realdiv(tf.cast(inp, tf.float32), 255.0)
     # image = debug_identity(image)
     if random_crop:
         log.info("Apply random cropping")
         image = tf.image.resize_image_with_crop_or_pad(
             image, crop_size, crop_size)
         image = tf.random_crop(image, [image_size, image_size, 3])
     else:
         image = tf.image.resize_image_with_crop_or_pad(
             image, image_size, image_size)
     if random_flip:
         log.info("Apply random flipping")
         image = tf.image.random_flip_left_right(image)
     # Brightness/saturation/constrast provides small gains .2%~.5% on cifar.
     if random_color:
         image = tf.image.random_brightness(image, max_delta=63. / 255.)
         image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
         image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
     if whiten:
         log.info("Apply whitening")
         image = tf.image.per_image_whitening(image)
     return inp, image
Esempio n. 3
0
def generalised_dice_loss(prediction, ground_truth, weight_map=None):
    # prediction: (batch_size, height, width, nchannels)
    # ground_truth: (batch_size, height, width)
    # weight_map: (batch_size, height, width), binary mask including regions to consider

    dice_nclasses = prediction.shape[3].value # value
    ground_truth = tf.cast(ground_truth, dtype=tf.int64, name='dice_cast_ground_truth')
    hot_labels = tf.one_hot(ground_truth, axis=-1, depth=dice_nclasses, name='dice_hot_labels')
    if weight_map is not None:
        weight_map_tile = tf.stack([weight_map, weight_map], axis=3)
        if dice_nclasses>2:
            for dim in range(dice_nclasses-2):
                weight_map_tile = tf.concat(
                    [weight_map_tile, tf.expand_dims(weight_map, 3)], axis=3, name='dice_concat_weight_map')
#                         hot_labels = tf.one_hot(ground_truth, axis=-1, depth=dice_nclasses, name='dice_hot_labels')
        hot_labels = hot_labels * weight_map_tile
        prediction = prediction * weight_map_tile#tf.cast(hot_labels, dtype=tf.float32, name='dice_cast_hot_labels')

    sum_labels = tf.reduce_sum(hot_labels, axis=(1,2), name='dice_sum_labels')
    weights = tf.reciprocal(tf.square(tf.add(sum_labels, 1e-6)), name='dice_weights')
    den_part = tf.add(prediction, hot_labels, name='dice_den_part')
    num_part = tf.multiply(prediction, hot_labels, name='dice_num_part')
    den_part_sum = tf.reduce_sum(den_part, axis=(1,2), name='dice_den_part_sum')
    num_part_sum = tf.reduce_sum(num_part, axis=(1,2), name='dice_num_part_sum')
    gdl_den = tf.reduce_sum(tf.add(tf.multiply(weights, den_part_sum), 1e-6, name='dice_add'), axis=1)
    gdl_num = tf.reduce_sum(tf.multiply(weights, num_part_sum), axis=1, name='dice_gdl_num')
    real_div = tf.realdiv(gdl_num ,gdl_den)
    gdl_compl = tf.scalar_mul(2, real_div)
    gdl_array = tf.subtract(1., gdl_compl)
    gdl = tf.reduce_mean(gdl_array)
    
    return gdl
Esempio n. 4
0
def matrix_other():
    isess = tf.InteractiveSession()
    X = tf.Variable(tf.eye(3))
    W = tf.Variable(tf.random_normal(shape=(3, 3)))

    X.initializer.run()
    W.initializer.run()
    logger.info("X\n%s" % X.eval())
    logger.info("W\n%s" % W.eval())

    logger.info("tf.div(X,W)\n%s" % tf.div(X, W).eval())
    logger.info("tf.truediv(X,W)\n%s" % tf.truediv(X, W).eval())
    logger.info("tf.floordiv(X,W)\n%s" % tf.floordiv(X, W).eval())
    logger.info("tf.realdiv(X,W)\n%s" % tf.realdiv(X, W).eval())

    # logger.info("tf.truncatediv(X,W)\n%s" % tf.truncatediv(X, W).eval())
    logger.info("tf.floor_div(X,W)\n%s" % tf.floor_div(X, W).eval())
    logger.info("tf.truncatemod(X,W)\n%s" % tf.truncatemod(X, W).eval())
    logger.info("tf.floormod(X,W)\n%s" % tf.floormod(X, W).eval())

    logger.info("tf.cross(X,W)\n%s" % tf.cross(X, W).eval())
    logger.info("tf.add_n(X,W)\n%s" % tf.add_n([X, W]).eval())
    logger.info("tf.squared_difference(X,W)\n%s" %
                tf.squared_difference(X, W).eval())

    isess.close()
Esempio n. 5
0
 def test_div(self):
     x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape(
         (2, 2))
     x = tf.placeholder(tf.float32, [2, 2], name=_TFINPUT)
     x_ = tf.realdiv(x, x)
     _ = tf.identity(x_, name=_TFOUTPUT)
     self._run_test_case([_OUTPUT], {_INPUT: x_val})
Esempio n. 6
0
def to_prob(unnorm):
    """
    :param unnorm: tensor with non-negative entries
    :return: tensor; probability distribution given by normalizing the input on its last dimension
    """
    stable = unnorm + TINY
    return tf.realdiv(stable, tf.reduce_sum(stable, -1, keep_dims=True))
Esempio n. 7
0
def adam(grads, velocity_m, velocity_v, var_list, lr, beta1, beta2, epsilon):
    """ADAM update.

    Args:
        grads: List of gradients of the trainable variables.
        velocity_m: List of velocity of the trainable variables.
        velocity_v: List of velocity of the trainable variables.
        var_list: List of variables to be optimized.
        lr: Learning rate.
        beta1: First momentum.
        beta2: Second momentum.

    Returns:
        var_list_new: List of new variables to be assigned.
        velocity_m_new: List of new velocity_m to be assigned.
        velocity_v_new: List of new velocity_v to be assigned.
    """
    velocity_m_new = [
        beta1 * mm + (1 - beta1) * gg
        for gg, mm in list(zip(grads, velocity_m))
    ]
    velocity_v_new = [
        beta2 * vv + (1 - beta2) * gg * gg
        for gg, vv in list(zip(grads, velocity_v))
    ]
    var_list_new = [
        var - tf.realdiv(lr * mm, (tf.sqrt(vv) + epsilon))
        for var, mm, vv in list(zip(var_list, velocity_m_new, velocity_v_new))
    ]
    return var_list_new, velocity_m_new, velocity_v_new
Esempio n. 8
0
    def minimize(self, cost, var_list=None, global_step=None,
                 gate_gradients=1):
        """See above in class Optimizer."""
        if var_list is None:
            var_list = tf.trainable_variables()
        self._var_list = var_list

        if global_step is None:
            global_step = tf.get_variable(
                'global_step', [],
                dtype=tf.int64,
                initializer=tf.constant_initializer(0, dtype=tf.int64),
                trainable=False)

        grads = tf.gradients(cost, var_list, gate_gradients=gate_gradients)
        self._grads = grads

        self._lr, self._mom = self.reparameterize(self.hyperparams['lr'],
                                                  self.hyperparams['mom'])

        # Learning rate decay.
        decay = self.hyperparams['decay']
        t = tf.cast(global_step, self.dtype)
        time_const_f = tf.constant(self._time_const, dtype=self.dtype)
        self._lr = self._lr * tf.pow(1.0 + tf.realdiv(t, time_const_f), -decay)

        grads = tf.gradients(cost, var_list, gate_gradients=True)
        return self.apply_gradients(
            list(zip(grads, var_list)), global_step=global_step)
def soft_n_cut_loss2_multi(seg, weight, radius=5, K=2):
    cropped_seg = []
    sum_weight = tf.reduce_sum(weight)
    padded_seg = tf.pad(
        seg,
        [
            [radius - 1, radius - 1],
            [radius - 1, radius - 1],
            [radius - 1, radius - 1],
            [radius - 1, radius - 1],
        ],
    )
    for m in range((radius - 1) * 2 + 1):
        column = []
        for n in range((radius - 1) * 2 + 1):
            column.append(
                tf.identity(padded_seg[:, :, m:m + seg.shape[2],
                                       n:n + seg.shape[3]]))
        cropped_seg.append(tf.stack(column, 4))
    cropped_seg = tf.stack(cropped_seg, 4)
    cropped_seg = tf.cast(cropped_seg, dtype=tf.float64)
    seg = tf.cast(seg, dtype=tf.float64)
    # t_weight = tf.constant(weight)
    multi1 = tf.multiply(cropped_seg, weight)
    multi2 = tf.multiply(tf.reduce_sum(multi1), seg)
    multi3 = tf.multiply(sum_weight, seg)
    assocA = tf.reduce_sum(
        tf.reshape(multi2, (multi2.shape[1], multi2.shape[2], -1)))
    assocV = tf.reduce_sum(
        tf.reshape(multi3, (multi3.shape[1], multi3.shape[2], -1)))
    assoc = tf.reduce_sum(tf.realdiv(assocA, assocV))
    return tf.add(assoc, K)  #  de base -assoc mais loss neg donc assoc
Esempio n. 10
0
    def minimize(self, cost, var_list=None, global_step=None,
                 gate_gradients=1):
        """See above in class Optimizer."""
        if var_list is None:
            var_list = tf.trainable_variables()
        self._var_list = var_list

        if global_step is None:
            global_step = tf.get_variable(
                'global_step', [],
                dtype=tf.int64,
                initializer=tf.constant_initializer(0, dtype=tf.int64),
                trainable=False)

        grads = tf.gradients(cost, var_list, gate_gradients=gate_gradients)
        self._beta1, self._beta2 = self.reparameterize(
            self.hyperparams['beta1'], self.hyperparams['beta2'])
        t = tf.cast(global_step, self.dtype) + 1.0
        ratio = tf.realdiv(
            tf.sqrt(1.0 - tf.pow(self._beta2, t)),
            (1.0 - tf.pow(self._beta1, t)))
        self._lr = self.hyperparams['lr'] * ratio
        grads = tf.gradients(cost, var_list, gate_gradients=True)
        self._grads = grads
        return self.apply_gradients(
            list(zip(grads, var_list)), global_step=global_step)
Esempio n. 11
0
 def test_div(self):
     x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape(
         (2, 2))
     x = tf.placeholder(tf.float32, [2, 2], name=_TFINPUT)
     x_ = tf.realdiv(x, x)
     output = tf.identity(x_, name=_TFOUTPUT)
     actual, expected = self._run(output, {x: x_val}, {_INPUT: x_val})
     self.assertAllClose(expected, actual)
Esempio n. 12
0
    def __init__(self):
        self.save_path = 'checkpoints/reward_model_v3'

        self.from_scratch = False
        self.pixel_dim = [64,64,1]
        self.state_size = 64*64 #784
        self.action_size = 5 #one-hot encoder of action choice
        self.n_z = 30
        self.batchsize = 64
        self.r_estimates = 10
        self.epochs = 1
        self.BETA = 1
        self.LAMBDA = 1
        self.learning_rate = .00001

        self.state = tf.placeholder(tf.float32, [None, self.pixel_dim[0], self.pixel_dim[1], self.pixel_dim[2]])
        self.state_prime = tf.placeholder(tf.float32, [None, self.pixel_dim[0], self.pixel_dim[1], self.pixel_dim[2]])
        self.action = tf.placeholder(tf.float32, [None, self.action_size])
        self.reward = tf.placeholder(tf.float32)
        self.sample = tf.placeholder(tf.float32, [None, self.n_z])

        self.samples_1 = tf.random_normal([self.batchsize, self.n_z],0,1,dtype=tf.float32)
        self.samples_2 = tf.random_normal([self.batchsize, self.n_z],0,1,dtype=tf.float32)

        self.mu_s, self.sigma_s = self.stateEncoder(self.state, reuse=None, scope='encoder')
        self.guessed_z = self.mu_s + (self.sigma_s * self.samples_1)
        self.mu_z_prime, self.sigma_z_prime = self.transition(self.guessed_z, self.action, reuse=None)
        self.guessed_z_prime = self.mu_z_prime + (self.sigma_z_prime * self.samples_2)
        self.mu_s_prime, self.sigma_s_prime = self.stateEncoder(self.state_prime, reuse=True, scope='encoder')
        self.mu_r, self.sigma_r= self.rewardGenerator(self.guessed_z_prime, reuse=None)

        self.mu, self.sigma = self.stateEncoder(self.state, reuse=True, scope='encoder') #these are for later use when predicting
        self.mu_t, self.sigma_t = self.transition(self.sample, self.action, reuse = True)
        self.mu_re, self.sigma_re = self.rewardGenerator(self.sample, reuse=True)

        self.transition_loss = 0.5 * tf.reduce_sum(
            tf.log(tf.square(self.sigma_s_prime)+1e-8) - tf.log(tf.square(self.sigma_z_prime)+1e-8) + tf.realdiv(tf.square(self.sigma_z_prime), tf.square(self.sigma_s_prime)+1e-8)
            + tf.realdiv(tf.square(self.mu_z_prime - self.mu_s_prime), tf.square(self.sigma_s_prime)+1e-8) - 1, 1)

        self.latent_loss = 0.5 * tf.reduce_sum(tf.square(self.mu_s) + tf.square(self.sigma_s) - tf.log(1e-8+tf.square(self.sigma_s)) - 1,1)
        #self.reward_loss = self.reward_loss_func(self.samples_2, self.reward, self.mu_z_prime, self.sigma_z_prime)
        self.reward_loss = -0.5*tf.log(self.sigma_r) - tf.square(tf.realdiv(self.reward - self.mu_r, 2*self.sigma_r))
        self.cost = tf.reduce_mean(self.LAMBDA*self.transition_loss + self.BETA*self.latent_loss + self.reward_loss)

        self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.cost)
Esempio n. 13
0
def lr_decay(init_lr, decay, time_const, global_step):
    """Gets a decayed learning rate using inverse decay rule.

    Args:
        init_lr: Initial learning rate at step 0.
        decay: Decay exponent.
        time_const: Time constant of the inverse decay rule.
        global_step: Time step.

    Returns:
        lr: Learning rate at the current time step.
    """
    decay = tf.constant(decay)
    decay_step = tf.cast(global_step, tf.float32)
    lr = tf.realdiv(init_lr,
                    tf.pow(1.0 + tf.realdiv(decay_step, decay_const),
                           self._decay_exp))
    return lr
Esempio n. 14
0
def multi_ch_conv(data, kernel, bidir=True):
    #Verify input tensor ranks
    tf.assert_rank(kernel, 1)
    tf.assert_rank(data, 2)

    num_channels = tf.shape(data, name="NumChan")[0]
    num_samples = tf.shape(data, name="NumSamples")[1]
    kernel_len = tf.shape(kernel, name="KernelLen")[0]
    '''
    NOTE
    The above assert_rank's are evaluated when the grab is being compiled.
    The below assert_less_equal is evaluated when the graph is run, so it
    must actually be *run*, which is why we have to use the control_dependencies
    call below.
    '''

    asserts = [
        tf.assert_less_equal(
            kernel_len,
            num_samples,
            message=
            "JCR: Lenth of kernel must be shorter than the length of the input."
        )
    ]

    with tf.control_dependencies(asserts):

        #Pad the beginning / end of each channel so we can do a single 1D convolve
        with tf.name_scope("PadInputTensor"):
            p = tf.cast(tf.ceil(
                tf.realdiv(tf.cast(kernel_len, tf.float32),
                           tf.constant(2.0, dtype=tf.float32))),
                        dtype=tf.int32)
            data_pad = tf.pad(data, [[0, 0], [p, p]])

        with tf.name_scope("Conv1D"):
            #Reshape to use with conv1d
            #[batch,width,chan]
            data_1d = tf.reshape(data_pad, [1, -1, 1])

            #Reshape kernel to use with conv1d
            #[width,chan_in,chan_out]
            kernel_1d = tf.reshape(kernel, [-1, 1, 1])

            conv_raw = tf.nn.conv1d(data_1d, kernel_1d, 1, 'SAME')

            if bidir:
                conv_raw_rev = tf.nn.conv1d(tf.reverse(conv_raw, [1]),
                                            kernel_1d, 1, 'SAME')
                conv_raw = tf.reverse(conv_raw_rev, [1])

        with tf.name_scope("Reconstruct"):
            conv_raw_rs = tf.reshape(conv_raw, [num_channels, -1])

            conv_raw_sliced = tf.slice(conv_raw_rs, [0, p], [-1, num_samples])

        return conv_raw_sliced
Esempio n. 15
0
 def standard_to_natural(self, standard_params):
     batchsize = standard_params.get_shape().as_list()[0]
     ratio = tf.realdiv(
         standard_params[:, :-1],
         tf.expand_dims(standard_params[:, -1],
                        axis=-1))  # shape: [batch,n_mixtures-1,1]
     return tf.concat(
         [tf.log(ratio), tf.zeros([batchsize, 1, 1])],
         axis=1)  # shape: [batch,n_mixtures,1]
Esempio n. 16
0
    def __init__(self):
        self.save_path = 'checkpoints/batchnorm_weightshare'

        self.from_scratch = False
        self.pixel_dim = [64,64,1]
        self.state_size = 64*64 #784
        self.action_size = 5 #one-hot encoder of action choice
        self.n_z = 15
        self.batchsize = 64
        self.r_estimates = 50
        self.epochs = 1
        self.BETA = 1
        self.learning_rate = .00001

        self.state = tf.placeholder(tf.float32, [None, self.pixel_dim[0], self.pixel_dim[1], self.pixel_dim[2]])
        self.state_prime = tf.placeholder(tf.float32, [None, self.pixel_dim[0], self.pixel_dim[1], self.pixel_dim[2]])
        self.action = tf.placeholder(tf.float32, [None, self.action_size])
        self.reward = tf.placeholder(tf.float32)
        self.sample = tf.placeholder(tf.float32, [None, self.n_z])

        samples_1 = tf.random_normal([self.batchsize, self.n_z],0,1,dtype=tf.float32)
        samples_2 = tf.random_normal([self.batchsize, self.n_z],0,1)

        mu_z1, sigma_z1 = self.stateEncoder(self.state, reuse=None, name='encoder')
        guessed_z = mu_z1 + (sigma_z1 * samples_1)
        mu_z2, sigma_z2 = self.transition(guessed_z, self.action, reuse=None)
        guessed_z_prime = mu_z2 + (sigma_z2*samples_2)
        mu_s, sigma_s = self.stateEncoder(self.state_prime, reuse=True, name='encoder')

        self.mu, self.sigma = self.stateEncoder(self.state, reuse=True, name='encoder')
        self.mu_t, self.sigma_t = self.transition(self.sample, self.action, reuse = True)

        self.transition_loss = 0.5 * tf.reduce_sum(
            tf.log(tf.square(sigma_s)+1e-8) - tf.log(tf.square(sigma_z2)+1e-8) + tf.realdiv(tf.square(sigma_z2), tf.square(sigma_s)+1e-6)
            + tf.realdiv(tf.square(mu_z2 - mu_s), tf.square(sigma_s)+1e-6) - 1, 1)

        #self.transition_loss = self.transition_loss_func(samples_2, mu_s, mu_z2, sigma_z2)
        self.latent_loss = 0.5 * tf.reduce_sum(tf.square(mu_z1) + tf.square(sigma_z1) - tf.log(1e-8+tf.square(sigma_z1)) - 1,1)
        #self.latent_loss2 = 0.5 * tf.reduce_sum(tf.square(mu_s) + tf.square(sigma_s) - tf.log(1e-8+tf.square(sigma_s)) - 1,1)

        #self.reward_loss = tf.reduce_sum(tf.square(self.reward_pred - self.reward))
        self.cost = tf.reduce_mean(self.transition_loss + self.BETA*self.latent_loss)#  + self.reward_loss) # + self.latent_loss2)

        self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.cost)
def main():
    tf.enable_eager_execution()

    labels = [0, 3]
    logit = [[4.5, 4.5, 4.5, 4.5], [4.5, 4.5, 4.5, 4.5]]

    # result = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=labels)
    result = tf.realdiv(logit, 0.5)

    print(result)
Esempio n. 18
0
    def _distance(self, X):
        # [N,F,M]
        raw_diff = tf.subtract(tf.expand_dims(X, axis=2), self.train_features)

        if self.metric == KNN.METRIC.MANHATTAN:
            distance = tf.reduce_sum(tf.abs(raw_diff), axis=1)
        elif self.metric == KNN.METRIC.EUCLID:
            distance = tf.reduce_sum(tf.square(raw_diff), axis=1)
            if self.weighted_by_distance.exact_distance:
                distance = tf.sqrt(distance)
        elif self.metric._value_ < 1000:
            distance = tf.reduce_sum(tf.pow(tf.abs(raw_diff),
                                            self.metric.minkowski_power),
                                     axis=1)
            if self.weighted_by_distance.exact_distance:
                distance = tf.pow(distance, 1 / self.metric.minkowski_power)
        elif self.metric == KNN.METRIC.MAXIMUM:
            distance = tf.reduce_max(tf.abs(raw_diff), axis=1)
        elif self.metric == KNN.METRIC.COSINE:
            size_features = tf.sqrt(
                tf.reduce_sum(tf.square(X), axis=1, keepdims=True))
            normalized_input_features = tf.where(tf.less(size_features,
                                                         1e-7), X,
                                                 tf.realdiv(X, size_features))

            tf.print('This can be optimized')

            size_trained_features = tf.sqrt(
                tf.reduce_sum(tf.square(self.train_features), axis=1))
            normalized_trainded_features = tf.where(
                tf.less(size_trained_features, 1e-7), self.train_features,
                tf.realdiv(self.train_features, size_trained_features))

            distance = tf.matmul(
                normalized_input_features,
                tf.squeeze(normalized_trainded_features, axis=0))
        else:
            raise ValueError('Unknow option KNN.METRIC')
        return distance
Esempio n. 19
0
 def run(self, _buf):
     with tf.name_scope("RUN_IAFPower"):
         tf.assert_rank(_buf['data'], 2, message="JCR: Input must be rank 2 tensor")
         asserts= [
                 tf.assert_equal(tf.shape(_buf['data'])[0], self.mNCHAN, message="JCR: Input Dim-0 must equal number of channels")
                 ]
         
         with tf.control_dependencies(asserts):
             s_len = tf.shape(_buf['data'])[1]
             
             pphz = tf.realdiv(tf.cast(s_len, tf.float32) , tf.cast(self.mFS, tf.float32))
             
             #This value (in Hz) is used to determine the peak frequency - larger window uses more surrounding values to calculate IAF
             IAF_Peak_Window_Size = tf.realdiv(self.mPEAK_WINDOW, 2.0)
             
             asserts = [
                     tf.assert_greater_equal(IAF_Peak_Window_Size, tf.realdiv(1.0, pphz), message="JCR: Invalid number of Hz/Window")
                     ]
             with tf.control_dependencies(asserts):
                 IAF_Window = tf.ones([tf.cast(tf.multiply(IAF_Peak_Window_Size,pphz),tf.int32)])
         
                 data_fft = tf.fft(tf.cast(_buf['data'],tf.complex64))
                 
                 #Convolve the window over the FFT to strengthen peak
                 data_fft_neighbor_avg = Utils.multi_ch_conv(tf.cast(tf.abs(data_fft),tf.float32),IAF_Window)
                 half_size = tf.cast((tf.shape(data_fft_neighbor_avg)[1] / 2), tf.int32)
                 dout1 = tf.argmax(data_fft_neighbor_avg[:, 0:half_size], axis=1)
                 dout = tf.realdiv( tf.cast(dout1, tf.float32), tf.realdiv(tf.cast(half_size,tf.float32), tf.realdiv(tf.cast(self.mFS,tf.float32), 2.0)))
     
                 asserts = [
                         tf.assert_equal(tf.shape(dout)[0], self.mNCHAN, message="JCR: Input/output shape mismatch",name='FinalCheck')
                         ]
                 with tf.control_dependencies(asserts):
                     return {
                             'data':dout,
                             'summaries':_buf['summaries'],
                             #pass dout shape to updates so that the assert gets evaluated
                             'updates': _buf['updates'] + [tf.shape(dout)[0]]
                             }
Esempio n. 20
0
    def __init__(self, params):
        self.learn_rate = params["learn_rate"]
        self.optimizer = params[
            "optimizer"] if "optimizer" in params else "GradientDescentOptimizer"
        self.embedding_size = params["embedding_size"]
        self.num_nodes = params["num_nodes"]
        self.lbd = params["lambda"]
        self.theta = params["theta"]
        self.clip_min = params["clip_min"]
        self.tol = params["tol"] if "tol" in params else 0.0001

        def clip_by_min(x, m=0.0):
            return tf.clip_by_value(x, m, float('inf'))

        self.tensor_graph = tf.Graph()
        with self.tensor_graph.as_default():
            self.D = tf.placeholder(tf.float32,
                                    shape=[self.num_nodes, self.num_nodes])

            self.Z = tf.Variable(tf.random_uniform(
                [self.num_nodes, self.embedding_size], -1.0, 1.0),
                                 name="Z",
                                 dtype=tf.float32)

            # shape(a) = [n, 1]
            self.a = tf.norm(self.Z, axis=1, keep_dims=True)
            self.dist = 2 - 2 * tf.matmul(
                self.Z, tf.transpose(self.Z)) / clip_by_min(
                    self.a * tf.transpose(self.a), self.clip_min)
            self.D_norm = tf.realdiv(self.D, tf.norm(self.D))
            self.loss = tf.norm(
                clip_by_min(self.D_norm - tf.realdiv(
                    self.dist, clip_by_min(tf.norm(self.dist), self.clip_min)))
            ) + self.lbd * tf.exp(-self.theta * tf.norm(
                tf.realdiv(self.dist,
                           clip_by_min(tf.norm(self.dist), self.clip_min))))

            self.train_step = getattr(tf.train, self.optimizer)(
                self.learn_rate).minimize(self.loss)
Esempio n. 21
0
    def reward_loss_func(self, samples, reward, mu_z_prime, sigma_z_prime):
        loss = 0
        for i in range(self.r_estimates):
            guess = mu_z_prime + (sigma_z_prime*samples[i,:,:])
            if i==0:
                mu, sigma = self.rewardGenerator(guess, reuse=None)
            else:
                mu, sigma = self.rewardGenerator(guess, reuse=True)
            loss += -0.5*tf.log(sigma) - tf.square(tf.realdiv(reward - self.mu, 2*sigma))

        loss /= self.r_estimates

        return loss
Esempio n. 22
0
    def cosine_similarity(self, tensor1, tensor2):
        """计算cosine similarity"""
        # 把张量拉成矢量,这是我自己的应用需求
        tensor1 = tf.reshape(tensor1, shape=(1, -1))
        tensor2 = tf.reshape(tensor2, shape=(1, -1))
        # 求模长
        tensor1_norm = tf.sqrt(tf.reduce_sum(tf.square(tensor1)))
        tensor2_norm = tf.sqrt(tf.reduce_sum(tf.square(tensor2)))
        # 内积
        tensor1_tensor2 = tf.reduce_sum(tf.multiply(tensor1, tensor2))
        # cosin = tensor1_tensor2 / (tensor1_norm * tensor2_norm)
        cosin = tf.realdiv(tensor1_tensor2, tensor1_norm * tensor2_norm)

        return cosin
Esempio n. 23
0
    def add_metrics_NO(
            self, tf_prediction,
            tf_ground_truth):  # NOTEICE ORDER!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # Works ONLY for 2 classes
        pred_argmax = tf.argmax(tf_prediction, axis=3, name='pred_argmax')

        precision2 = tf.metrics.precision(tf_ground_truth, pred_argmax)
        recall2 = tf.metrics.recall(tf_ground_truth, pred_argmax)

        # when collecting both returned values then tf tn fp fn are always zero
        true_pos, tp_update = tf.metrics.true_positives(tf_ground_truth,
                                                        pred_argmax,
                                                        name='false_pos')
        true_neg, tn_update = tf.metrics.true_negatives(tf_ground_truth,
                                                        pred_argmax,
                                                        name='true_neg')
        false_pos, fp_update = tf.metrics.false_positives(tf_ground_truth,
                                                          pred_argmax,
                                                          name='false_pos')
        false_neg, fn_update = tf.metrics.false_negatives(tf_ground_truth,
                                                          pred_argmax,
                                                          name='false_neg')

        precision0 = tf.realdiv(true_pos, tf.add(true_pos, false_pos))
        recall0 = tf.realdiv(true_pos, tf.add(true_pos, false_neg))
        specificity0 = tf.realdiv(false_pos, tf.add(false_pos, true_neg))

        precision = precision0
        recall = recall0
        specificity = specificity0

        correct_prediction = tf.equal(pred_argmax,
                                      tf.cast(tf_ground_truth, tf.int64))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return precision, recall, specificity, accuracy, true_pos, true_neg, \
               false_pos, false_neg, precision2, recall2, pred_argmax
Esempio n. 24
0
def get_test_pi(a, b):
    with tf.variable_scope('test_pi'):
        max_a = tf.check_numerics(tf.maximum(a, 1e-20), 'a is going NaN')
        div_a = tf.check_numerics(tf.realdiv(1., max_a), 'div_a')
        denom = tf.check_numerics(tf.lgamma(1 + div_a + b + 1e-20),
                                  'Error here 1')
        term_1 = tf.check_numerics(tf.lgamma(1 + div_a + 1e-20),
                                   'Error here 2')
        b_max = tf.check_numerics(tf.maximum(b, 1e-20), 'b is going nan')
        log_b = tf.check_numerics(tf.log(b_max), 'Error here b_log')
        term_2 = tf.check_numerics(tf.add(log_b, tf.lgamma(b_max)),
                                   'Error here 3')
        numerator = tf.check_numerics(tf.add(term_1, term_2), 'Error here 4')
        full_subtract = tf.check_numerics(
            tf.subtract(numerator, denom, name='subtract'), 'Error here 5')

        return tf.exp(full_subtract, name='final_exp')
def soft_n_cut_loss2(seg, weight, radius=5, K=2):
    cropped_seg = []
    sum_weight = tf.reduce_sum(weight)
    print("wei", weight)
    print("swei", sum_weight)
    # sum_weight = weight
    padded_seg = tf.pad(
        seg,
        [
            [0, 0],
            [radius - 1, radius - 1],
            [radius - 1, radius - 1],
            [0, 0],
        ],
    )
    for m in tf.range((radius - 1) * 2 + 1, dtype=tf.int32):
        column = []
        for n in tf.range((radius - 1) * 2 + 1, dtype=tf.int32):
            column.append(
                tf.identity(padded_seg[:, m:m + seg.shape[1],
                                       n:n + seg.shape[2], :]))
        cropped_seg.append(column)
    # cropped_seg = tf.stack(cropped_seg, 4)
    cropped_seg = tf.cast(
        cropped_seg, dtype=tf.float64)  # shape chelou + small values -> nan
    # print(cropped_seg)

    seg = tf.cast(seg, dtype=tf.float64)
    t_weight = tf.constant(weight)  # shape good mais full 0
    multi1 = tf.multiply(cropped_seg, t_weight)  # shape chelou et full 0
    multi2 = tf.multiply(tf.reduce_sum(multi1), seg)
    multi3 = tf.multiply(sum_weight, seg)
    assocA = tf.reduce_sum(
        tf.reshape(multi2, (multi2.shape[1], multi2.shape[2], -1)))
    assocV = tf.reduce_sum(
        tf.reshape(multi3, (multi3.shape[1], multi3.shape[2], -1)))
    assoc = tf.reduce_sum(tf.realdiv(assocA, assocV))
    return tf.add(assoc, K)  # de base -assoc mais loss neg donc assoc
Esempio n. 26
0
        input_shape = (img_h, img_w, frame_history_len * img_c
                       )  # size_x, size_y,

    num_actions = env.action_space.n

    # INPUT DATA: previous action and image
    prev_action = tf.placeholder(tf.float32, [None, num_options + 1],
                                 name="prev_action")

    with tf.variable_scope('input_image'):
        # placeholder for current observation (or state)
        obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape),
                                  name="obs_t_ph")
        # casting to float on GPU ensures lower data transfer times.
        obs_t_float = tf.realdiv(tf.cast(obs_t_ph, tf.float32),
                                 255.0,
                                 name='obs_t_float')

    # CONVOLUTION
    convolution = conv_model(obs_t_float, scope="convolution", reuse=False)

    # MANAGER
    with tf.variable_scope("manager"):
        manager = mlp_model(convolution,
                            num_options + 1,
                            scope="manager",
                            reuse=False)
        manager_pred_ac = tf.argmax(manager, axis=1, name="manager_pred_ac")
        manager_one_hot = tf.one_hot(manager_pred_ac,
                                     depth=num_options + 1,
                                     name="manager_one_hot")
Esempio n. 27
0
def _realdiv_maybe_zero(x, y):
  """Support tf.realdiv(x, y) where y may contain zeros."""
  return tf.where(tf.less(y, _EPSILON), tf.zeros_like(x), tf.realdiv(x, y))
Esempio n. 28
0
def smape(y_true, y_prediction):
    tot = tf.reduce_sum(y_true)
    map = tf.truediv((tf.subtract(tf.abs(y_prediction), tf.abs(y_true))),
                     (tf.add(tf.abs(y_true), tf.abs(y_prediction))) / 2.0)
    smape = tf.realdiv(100 * map, tot)
    return smape
Esempio n. 29
0
def mape(y_true, y_prediction):
    tot = tf.reduce_sum(y_true)
    wmape = tf.realdiv(
        tf.reduce_sum(tf.abs(tf.subtract(y_true, y_prediction))),
        tot) * 100  # /tot
    return (wmape)
Esempio n. 30
0
def main():
    env1 = ArmEnvDQN(episode_max_length=200,
                    size_x=4,
                    size_y=3,
                    cubes_cnt=3,
                    scaling_coeff=3,
                    action_minus_reward=-1,
                    finish_reward=200,
                    tower_target_size=3)

    env2 = ArmEnvDQN_1(episode_max_length=200,
                    size_x=4,
                    size_y=3,
                    cubes_cnt=3,
                    scaling_coeff=3,
                    action_minus_reward=-1,
                    finish_reward=200,
                    tower_target_size=3)

    env3 = ArmEnvDQN_2(episode_max_length=200,
                       size_x=4,
                       size_y=3,
                       cubes_cnt=3,
                       scaling_coeff=3,
                       action_minus_reward=-1,
                       finish_reward=200,
                       tower_target_size=3)
    # print(env.reset())

    # First let's load meta graph and restore weights
    # saver = tf.train.import_meta_graph('option_lift_cube.ckpt.meta')

    #     saver2 = tf.train.import_meta_graph('/tmp/option_lift_cube.ckpt.meta')
    #     saver.restore(session, tf.train.latest_checkpoint('./'))
    frame_history_len = 1
    img_h, img_w, img_c = env1.observation_space.shape
    input_shape = (img_h, img_w, frame_history_len * img_c)  # size_x, size_y,
    num_actions = env1.action_space.n

    #     # placeholder for current observation (or state)
    #     obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))
    #     # casting to float on GPU ensures lower data transfer times.
    #     obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0



    #     pred_q = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
    #     pred_ac = tf.argmax(pred_q, axis=1)
    # graph = tf.get_default_graph()

    obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name="obs_t_ph")
    # casting to float on GPU ensures lower data transfer times.
    obs_t_float = tf.realdiv(tf.cast(obs_t_ph, tf.float32), 255.0, name='obs_t_float')



    conv = conv_model(obs_t_float, scope="convolution", reuse=False)
    pred_q = mlp_model(conv, num_actions, scope="task2", reuse=False)
    pred_ac = tf.argmax(pred_q, axis=1, name="pred_ac")

    #     obs_t_float2 = graph.get_tensor_by_name("obs_t_ph_lift:0")

    ## How to access saved operation
    #     pred_ac2 = graph.get_tensor_by_name("pred_ac_lift:0")

    episode_reward = 0
    episode_length = 0
    last_obs = env3.reset()

    session = tf.Session()

    #     saver2.restore(session, "/tmp/option_lift_cube.ckpt")
    saver1 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="convolution"))
    saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="task2"))

    saver1.restore(session, '../experiments/DQN&Options end-to-end/experiment task0/saved_model/conv_graph.ckpt')
    saver2.restore(session, '../experiments/DQN&Options end-to-end/experiment task2/saved_model/graph.ckpt')

    for t in itertools.count():

        env3.render()
        obs = encode_observation(np.array([last_obs]))
        action = session.run(pred_ac, {obs_t_float: [obs]})[0]

        next_obs, reward, done, info = env3.step(action)

        episode_reward += reward
        episode_length += 1

        if done or episode_length == 100:
            env3.render()
            break

        last_obs = next_obs
    print(episode_reward, episode_length)
Esempio n. 31
0
def add_metrics(tf_prediction,
                tf_ground_truth,
                weights=None,
                threshold=0.5,
                network=None):  # NOTICE ORDER!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # 2 class metrics

    if network == 'iunet':  # iunet
        print('IF tf_prediction', tf_prediction.shape)
        tf_prediction0 = tf_prediction[-1, :, :, :, :] < threshold
        tf_prediction1 = tf_prediction[-1, :, :, :, :] > threshold
        tf_prediction_cc = tf.concat([tf_prediction0, tf_prediction1], 3)
        print('tf_prediction_cc', tf_prediction_cc.shape)
        tf_prediction_cc = tf.cast(tf_prediction_cc, dtype=tf.float32)
        pred_argmax = tf.cast(tf.argmax(tf_prediction_cc,
                                        axis=3,
                                        name='pred_argmax'),
                              dtype=tf.float32)
    else:
        pred_argmax = tf.cast(tf.argmax(tf_prediction,
                                        axis=3,
                                        name='pred_argmax'),
                              dtype=tf.float32)

    true_pos = tf.multiply(pred_argmax, tf_ground_truth)
    true_neg = tf.multiply(pred_argmax - 1, tf_ground_truth - 1)
    false_pos = tf.multiply(pred_argmax, tf_ground_truth - 1)
    false_neg = tf.multiply(pred_argmax - 1, tf_ground_truth)

    scope_suffix = ''
    if weights is not None:
        scope_suffix = '_weighted'
        true_pos = tf.multiply(true_pos, weights)
        true_neg = tf.multiply(true_neg, weights)
        false_pos = tf.multiply(false_pos, weights)
        false_neg = tf.multiply(false_neg, weights)

    true_pos = tf.count_nonzero(true_pos, dtype=tf.float32, name='true_pos')
    true_neg = tf.count_nonzero(true_neg, dtype=tf.float32, name='true_neg')
    false_pos = tf.count_nonzero(false_pos, dtype=tf.float32, name='false_pos')
    false_neg = tf.count_nonzero(false_neg, dtype=tf.float32, name='false_neg')

    precison_den = tf.add(true_pos, false_pos, name='precison_den')
    recall_den = tf.add(true_pos, false_neg, name='recall_den')
    specificity_den = tf.add(false_pos, true_neg, name='specificity_den')

    precision = tf.realdiv(true_pos,
                           tf.add(precison_den, 1e-6),
                           name='precision')
    recall = tf.realdiv(true_pos, tf.add(recall_den, 1e-6), name='recall')
    specificity = tf.realdiv(false_pos,
                             tf.add(specificity_den, 1e-6),
                             name='specificity')

    f1_den = tf.add(precision, recall, name='f1_den')
    f1 = tf.realdiv(2 * precision * recall, tf.add(f1_den, 1e-6), name='f1')

    acc_true = tf.add(true_pos, true_neg)
    acc_false = tf.add(false_pos, false_neg)
    accuracy = tf.realdiv(acc_true, tf.add(acc_true, acc_false))

    return true_pos, true_neg, false_pos, false_neg, precision, recall, specificity, f1, accuracy, pred_argmax