Example #1
0
    def predict_llp(self, q_samples, mu, ys=None):
        h_k = tf.nn.softplus(q_samples)

        dist = tfd.Poisson(rate=h_k)
        lpd = tf.reduce_logsumexp(dist.log_prob(mu)) - tf.log(tf.to_double(h_k.get_shape().as_list()[1]))

        return lpd, tf.to_double(0.0)
Example #2
0
    def _to_idf_over_doc_size(x, reduced_term_freq, corpus_size):
        """Calculates the inverse document frequency of terms in the corpus.

    Args:
      x : a `SparseTensor` of int64 representing string indices in vocab.
      reduced_term_freq: A `Tensor` of shape (vocabSize,) that represents the
          count of the number of documents with each term.
      corpus_size: A scalar count of the number of documents in the corpus

    Returns:
      The tf*idf values
    """
        # Add one to the reduced term freqnencies to avoid dividing by zero.
        idf = tf.log(
            tf.to_double(corpus_size) /
            (1.0 + tf.to_double(reduced_term_freq)))

        dense_doc_sizes = tf.to_double(
            tf.sparse_reduce_sum(
                tf.SparseTensor(indices=x.indices,
                                values=tf.ones_like(x.values),
                                dense_shape=x.dense_shape), 1))

        # For every term in x, divide the idf by the doc size.
        # The two gathers both result in shape <sum_doc_sizes>
        idf_over_doc_size = (tf.gather(idf, x.values) /
                             tf.gather(dense_doc_sizes, x.indices[:, 0]))

        return tf.SparseTensor(indices=x.indices,
                               values=tf.to_float(idf_over_doc_size),
                               dense_shape=x.dense_shape)
Example #3
0
    def random_crop_fn(src):
        width = tf.to_double(tf.shape(src)[1])
        height = tf.to_double(tf.shape(src)[0])
        ratio = width / height

        small = tf.minimum(width, height)
        scale = size * 3 / small
        width = width * scale
        height = height * scale

        width, height = tf.cond(
            width < size,
            lambda: (tf.to_double(size), size / ratio),
            lambda: (width, height)
        )
        width, height = tf.cond(
            height < size,
            lambda: (size * ratio, tf.to_double(size)),
            lambda: (width, height)
        )
        width = tf.to_int32(width)
        height = tf.to_int32(height)

        result = tf.random_crop(tf.image.resize_bilinear([src], [height, width])[0], [size, size, 3])
        if standardize_rgb:
            return tf.image.per_image_standardization(result)
        return result
Example #4
0
    def transitionModel(self, particle_states, ins):
        """
        particle_states update by ins information and transition noise
        """
        distance_para_x = self._parameters.transition_para[
            0] * self._parameters.meter_pixel_para
        distance_para_y = self._parameters.transition_para[
            1] * self._parameters.meter_pixel_para
        with tf.name_scope('transition'):
            loc_x, loc_y = tf.unstack(particle_states, axis=-1, num=2)
            ins_x, ins_y = tf.unstack(ins, axis=-1, num=2)
            ins_x = tf.tile([ins_x], multiples=[self._particle_nums, 1])
            ins_x = tf.transpose(ins_x * distance_para_x)
            ins_y = tf.tile([ins_y], multiples=[self._particle_nums, 1])
            ins_y = tf.transpose(ins_y * distance_para_y)
            ins_x += tf.to_double(
                tf.random_normal(loc_x.get_shape(),
                                 mean=0.0,
                                 stddev=self._parameters.step_stddev))
            ins_y += tf.to_double(
                tf.random_normal(loc_y.get_shape(),
                                 mean=0.0,
                                 stddev=self._parameters.step_stddev))

            new_particle_states = tf.stack([loc_x + ins_x, loc_y + ins_y],
                                           axis=-1)
        return new_particle_states
Example #5
0
    def build_mh_update(self):
        with tf.name_scope("gold_model"):
            self.joint_density_gold = self.joint_density(**self.symbols_gold)

        with tf.name_scope("proposed_model"):
            self.joint_density_proposed = self.joint_density(**self.symbols_proposed)
        with tf.name_scope("mh_updates"):            
            self.mh_ratio = self.joint_density_proposed - self.joint_density_gold
            self.uniform = tf.placeholder(dtype=tf.float32, name="u")
            log_uniform = tf.log(self.uniform)
            self.accepted = log_uniform < self.mh_ratio 
            
            update_ops = []
            for name, latent in self.latents.items():
                next_val = tf.select(self.accepted, latent["proposed"], latent["gold"])
                update_ops.append(latent["gold"].assign(next_val))

            self.step_counter = tf.Variable(0)
            self.accept_counter = tf.Variable(0)
            self.accept_rate = tf.to_double(self.accept_counter) / tf.to_double(self.step_counter)
            update_ops.append(self.step_counter.assign_add(1))
            update_ops.append(self.accept_counter.assign_add(tf.select(self.accepted, 1, 0)))
            
            self.global_update = tf.group(*update_ops)
                
        return self.global_update
  def _map_to_tfidf(x):
    """Calculates the inverse document frequency of terms in the corpus.
    Args:
      x : a SparseTensor of int64 representing string indices in vocab.
    Returns:
      The tf*idf values
    """
    # Add one to the reduced term freqnencies to avoid dividing by zero.
    idf = tf.log(tf.to_double(corpus_size) / (
        1.0 + tf.to_double(reduced_term_freq)))

    dense_doc_sizes = tf.to_double(tf.sparse_reduce_sum(tf.SparseTensor(
        indices=x.indices,
        values=tf.ones_like(x.values),
        dense_shape=x.dense_shape), 1))

    # For every term in x, divide the idf by the doc size.
    # The two gathers both result in shape <sum_doc_sizes>
    idf_over_doc_size = (tf.gather(idf, x.values) /
                         tf.gather(dense_doc_sizes, x.indices[:, 0]))

    return tf.SparseTensor(
        indices=x.indices,
        values=idf_over_doc_size,
        dense_shape=x.dense_shape)
Example #7
0
    def testMatchWithAffineTransform(self):
        direct_bj = tfb.Tanh()
        indirect_bj = tfb.Chain([
            tfb.AffineScalar(shift=tf.to_double(-1.0),
                             scale=tf.to_double(2.0)),
            tfb.Sigmoid(),
            tfb.AffineScalar(scale=tf.to_double(2.0))
        ])

        x = np.linspace(-3.0, 3.0, 100)
        y = np.tanh(x)
        self.assertAllClose(self.evaluate(direct_bj.forward(x)),
                            self.evaluate(indirect_bj.forward(x)))
        self.assertAllClose(self.evaluate(direct_bj.inverse(y)),
                            self.evaluate(indirect_bj.inverse(y)))
        self.assertAllClose(
            self.evaluate(direct_bj.inverse_log_det_jacobian(y,
                                                             event_ndims=0)),
            self.evaluate(
                indirect_bj.inverse_log_det_jacobian(y, event_ndims=0)))
        self.assertAllClose(
            self.evaluate(direct_bj.forward_log_det_jacobian(x,
                                                             event_ndims=0)),
            self.evaluate(
                indirect_bj.forward_log_det_jacobian(x, event_ndims=0)))
Example #8
0
def _to_tfidf(term_frequency, reduced_term_freq, corpus_size, smooth):
    """Calculates the inverse document frequency of terms in the corpus.

  Args:
    term_frequency: The `SparseTensor` output of _to_term_frequency.
    reduced_term_freq: A `Tensor` of shape (vocabSize,) that represents the
        count of the number of documents with each term.
    corpus_size: A scalar count of the number of documents in the corpus.
    smooth: A bool indicating if the idf value should be smoothed. See
        tfidf_weights documentation for details.

  Returns:
    A `SparseTensor` with indices=<doc_index_in_batch>, <term_index_in_vocab>,
    values=term frequency * inverse document frequency,
    and shape=(batch, vocab_size)
  """
    # The idf tensor has shape (vocab_size,)
    if smooth:
        idf = tf.log((tf.to_double(corpus_size) + 1.0) /
                     (1.0 + tf.to_double(reduced_term_freq))) + 1
    else:
        idf = tf.log(
            tf.to_double(corpus_size) / (tf.to_double(reduced_term_freq))) + 1

    gathered_idfs = tf.gather(tf.squeeze(idf), term_frequency.indices[:, 1])
    tfidf_values = tf.to_float(
        term_frequency.values) * tf.to_float(gathered_idfs)

    return tf.SparseTensor(indices=term_frequency.indices,
                           values=tfidf_values,
                           dense_shape=term_frequency.dense_shape)
Example #9
0
def gaussian_1d_log_likelihood(z, mu, sigma=0.1):
    dist = tf.distributions.Normal(loc=tf.reshape(z, [-1]),
                                   scale=tf.to_double(sigma))
    log_probs = dist.log_prob(mu)
    llh = tf.reduce_logsumexp(log_probs) - tf.log(
        tf.to_double(z.get_shape().as_list()[1]))
    return llh
Example #10
0
    def _map_to_tfidf(x):
        """Calculates the inverse document frequency of terms in the corpus.
    Args:
      x : a SparseTensor of int64 representing string indices in vocab.
    Returns:
      The tf*idf values
    """
        # Add one to the reduced term freqnencies to avoid dividing by zero.
        idf = tf.log(
            tf.to_double(corpus_size) /
            (1.0 + tf.to_double(reduced_term_freq)))

        dense_doc_sizes = tf.to_double(
            tf.sparse_reduce_sum(
                tf.SparseTensor(indices=x.indices,
                                values=tf.ones_like(x.values),
                                dense_shape=x.dense_shape), 1))

        # For every term in x, divide the idf by the doc size.
        # The two gathers both result in shape <sum_doc_sizes>
        idf_over_doc_size = (tf.gather(idf, x.values) /
                             tf.gather(dense_doc_sizes, x.indices[:, 0]))

        return tf.SparseTensor(indices=x.indices,
                               values=idf_over_doc_size,
                               dense_shape=x.dense_shape)
def logisticRegression_ref(X, Y, theta, alpha, numIterations):
    m,n = np.shape(X) # X: m-by-n matrix
    x_input = tf.placeholder(tf.float32, [None,n])
    y_input = tf.placeholder(tf.float32, [None])

    theta = tf.Variable(tf.zeros([n,1]), tf.float32)
    b = tf.Variable(tf.zeros([1], tf.float32))
    outputs = tf.sigmoid(tf.to_double(tf.reduce_sum(tf.matmul(x_input, theta), [1]) + b))

    init_op = tf.initialize_all_variables()

    loss_tmp = - tf.to_double(y_input) * tf.log(outputs) - (1.0 - tf.to_double(y_input)) * tf.log(1.0 - outputs)
    #loss = tf.reduce_sum(tf.to_float(loss_tmp))
    loss = tf.reduce_mean(tf.to_float(loss_tmp))

    optimizer = tf.train.GradientDescentOptimizer(alpha)
    train = optimizer.minimize(loss)

    with tf.Session() as sess:
        sess.run(init_op)
        #sess.run(y_hat, feed_dict={x_input: x})
        for step in xrange(numIterations):
            result = sess.run([train], feed_dict={x_input:X, y_input:Y})
            if step % 10000 == 0:
                print(step, sess.run(theta))
        theta2 = theta.eval()

    print "Logistic Regression(TensorFlow)", theta2
    return theta2
Example #12
0
def preprocess_for_eval(image,
                        labels,
                        bboxes,
                        out_shape,
                        resize,
                        scope='ssd_preprocessing_train'):
    """Preprocess an image for evaluation.

    Args:
        image: A `Tensor` representing an image of arbitrary size.
        out_shape: Output shape after pre-processing (if resize != None)
        resize: Resize strategy.

    Returns:
        A preprocessed image.
    """
    with tf.name_scope(scope):
        if image.get_shape().ndims != 3:
            raise ValueError('Input must be of size [height, width, C>0]')

        image = tf.to_float(image)
        image = tf_image_whitened(image, [_R_MEAN, _G_MEAN, _B_MEAN])

        # Add image rectangle to bboxes.
        bbox_img = tf.constant([[0., 0., 1., 1.]])
        if bboxes is None:
            bboxes = bbox_img
        else:
            bboxes = tf.concat(0, [bbox_img, bboxes])

        # Resize strategy...
        if resize == Resize.NONE:
            pass
        elif resize == Resize.CENTRAL_CROP:
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.PAD_AND_RESIZE:
            # Resize image first: find the correct factor...
            shape = tf.shape(image)
            factor = tf.minimum(
                tf.to_double(1.0),
                tf.minimum(tf.to_double(out_shape[0] / shape[0]),
                           tf.to_double(out_shape[1] / shape[1])))
            resize_shape = factor * tf.to_double(shape[0:2])
            resize_shape = tf.cast(tf.floor(resize_shape), tf.int32)

            image = tf_image.resize_image(
                image,
                resize_shape,
                method=tf.image.ResizeMethod.BILINEAR,
                align_corners=False)
            # Pad to expected size.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])

        # Split back bounding boxes.
        bbox_img = bboxes[0]
        bboxes = bboxes[1:]
        return image, labels, bboxes, bbox_img
def SumSquares(x):
    # orig: sum_i^n i*x_i^2
    x = tf.to_double(x)
    power_2 = tf.to_double(tf.constant(2, shape=tf.shape(x).numpy()))
    powered = tf.pow(x, power_2)
    multi = np.matlib.repmat([(i + 1) for i in range(np.shape(x)[1])],
                             np.shape(x)[0], 1)
    return tf.reduce_sum(powered * multi, 1)
Example #14
0
def lognormal_1d_log_likelihood(z, mu, sigma_square=0.01):
    sigma_square = tf.to_double(sigma_square)
    each_lpd = -0.5 * tf.log(2 * np.pi * sigma_square) - tf.log(mu) - tf.log(mu) ** 2 / (2 * sigma_square) + \
               tf.log(mu) * z / sigma_square - tf.square(z) / (2 * sigma_square)
    llh = tf.reduce_logsumexp(each_lpd) - tf.log(
        tf.to_double(z.get_shape().as_list()[1]))

    return llh
 def cal_training_loss(self, y_out, y_label):
     y_out = tf.to_double(y_out)
     y_label = tf.to_double(y_label)
     y_out1 = tf.div(1 - self.softsign(tf.matmul(self.top_config.D, y_out)),
                     2)
     training_loss = tf.reduce_mean(
         tf.square(y_out1 - tf.transpose(y_label)))
     return training_loss
Example #16
0
def preprocess_for_eval(image, labels, bboxes,
                        out_shape=EVAL_SIZE, data_format='NHWC',
                        difficults=None, resize=Resize.WARP_RESIZE,
                        scope='common_preprocessing_eval'):
    with tf.name_scope(scope):
        if image.get_shape().ndims != 3:
            raise ValueError('Input must be of size [height, width, C>0]')

        image = tf.to_float(image)
        image = tf_image_whitened(image, [_R_MEAN, _G_MEAN, _B_MEAN])

        # Add image rectangle to bboxes.
        bbox_img = tf.constant([[0., 0., 1., 1.]])
        if bboxes is None:
            bboxes = bbox_img
        else:
            bboxes = tf.concat([bbox_img, bboxes], axis=0)

        if resize == Resize.NONE:
            # No resizing...
            pass
        elif resize == Resize.CENTRAL_CROP:
            # Central cropping of the image.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.PAD_AND_RESIZE:
            # Resize image first: find the correct factor...
            shape = tf.shape(image)
            factor = tf.minimum(tf.to_double(1.0),
                                tf.minimum(tf.to_double(out_shape[0] / shape[0]),
                                           tf.to_double(out_shape[1] / shape[1])))
            resize_shape = factor * tf.to_double(shape[0:2])
            resize_shape = tf.cast(tf.floor(resize_shape), tf.int32)

            image = tf_image.resize_image(image, resize_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)
            # Pad to expected size.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.WARP_RESIZE:
            # Warp resize of the image.
            image = tf_image.resize_image(image, out_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)

        # Split back bounding boxes.
        bbox_img = bboxes[0]
        bboxes = bboxes[1:]
        # Remove difficult boxes.
        if difficults is not None:
            mask = tf.logical_not(tf.cast(difficults, tf.bool))
            labels = tf.boolean_mask(labels, mask)
            bboxes = tf.boolean_mask(bboxes, mask)
        # Image data format.
        if data_format == 'NCHW':
            image = tf.transpose(image, perm=(2, 0, 1))
        return image, labels, bboxes, bbox_img
def Matyas(x):
    # orig: 0.26(x_1^2+x_2^2)-0.48*x_1*x_2
    x = tf.to_double(x)
    power = tf.to_double(tf.constant(2, shape=[
        np.array(tf.shape(x)[0]),
    ]))
    add1 = 0.26 * tf.add(tf.pow(x[:, 0], power), tf.pow(x[:, 1], power))
    add2 = 0.48 * x[:, 0] * x[:, 1]
    return add1 - add2
def calc_func_graph(log_pitches, vectors, c=0.1, bounds=None):
    """
    log_pitches: tf.Variable that is the input coordinates
    vectors: arlike (not tf)
    c: coefficient to control the width of the bell curve
    """
    n_pitches = log_pitches.shape[1]
    pitch_distances = vector_pitch_distances(vectors)
    pitch_distances = tf.expand_dims(pitch_distances, -1)
    bases = get_bases(log_pitches.shape[-1] + 1)
    combinatorial_log_pitches = tf.abs(tf.tensordot(log_pitches, bases, 1))
    tiled_ones = tf.ones_like(pitch_distances) * -1.0
    combos = tf.tensordot(tiled_ones, combinatorial_log_pitches[None, :, :], 1)
    diffs = tf.abs(tf.add(tf.expand_dims(pitch_distances, 1), combos))
    scales = tf.exp(-1.0 * (diffs**2 / (2.0 * c**2)))

    prime_slice = PRIME_TRANSFORM[:vectors.shape[-1]]
    indigestibilities = tf.tensordot(tf.abs(vectors), prime_slice, 1)
    indigestibilities = tf.where(tf.not_equal(
        indigestibilities, tf.zeros_like(indigestibilities)),
                                 x=indigestibilities,
                                 y=tf.ones_like(indigestibilities))
    indigestibilities = indigestibilities[:, None,
                                          None] * tf.reciprocal(scales)

    if bounds is not None:
        if bounds.shape[0] != n_pitches:
            raise tf.errors.InvalidArgumentError(
                None, None,
                "bounds outer shape must == the number of log_pitches")
        pitch_diffs = tf.abs(pitch_distances)
        is_out_of_bounds = tf.logical_or(pitch_diffs < bounds[:, 0],
                                         pitch_diffs > bounds[:, 1])
        is_out_of_bounds = tf.tile(is_out_of_bounds[:, :, None],
                                   [1, 1, bases.shape[-1]])
        bases_mask = tf.equal(tf.abs(bases), 1.0)
        mask_me_again = tf.logical_not(
            tf.equal(tf.reduce_sum(bases, axis=0), 0.0))
        bases_mask = tf.logical_and(bases_mask, mask_me_again)
        is_relevant = tf.tile(bases_mask[None, :, :],
                              [tf.shape(is_out_of_bounds)[0], 1, 1])
        is_both = tf.logical_and(is_out_of_bounds, is_relevant)
        is_both = tf.reduce_any(is_both, axis=1)
        is_both = tf.tile(is_both[:, None, :],
                          [1, tf.shape(log_pitches)[0], 1])
        indigestibilities = tf.where(is_both,
                                     x=tf.fill(tf.shape(indigestibilities),
                                               tf.to_double(1.0e8)),
                                     y=indigestibilities)

    indigestibilities = tf.reduce_min(indigestibilities, axis=0)
    indigestibilities = tf.reduce_sum(indigestibilities, axis=-1)
    harmonicities = tf.to_double(
        bases.shape[-1]) * tf.reciprocal(indigestibilities)

    return harmonicities
Example #19
0
 def build_kl(self):
     jitter = gfs.settings.jitter * tf.eye(tf.shape(self.x_rand)[0],
                                           dtype=tf.float64)
     q = tfp.distributions.MultivariateNormalFullCovariance(
         tf.to_double(self.func_x_rand_mean),
         tf.to_double(self.func_x_rand_cov))
     p = tfp.distributions.MultivariateNormalFullCovariance(
         tf.zeros_like(self.func_x_rand_mean, dtype=tf.float64),
         self.prior_kernel.K(tf.to_double(self.x_rand)) + jitter)
     self.kl = tf.to_float(tfp.distributions.kl_divergence(q, p))
def StyblinskiTang(x):
    # orig: 0.5*sum(pow(x_i,4)-16*square(x_i) + 5*x_i)
    x = tf.to_double(x)
    power1 = tf.to_double(tf.constant(4, shape=(tf.shape(x)).numpy()))
    mod1 = tf.pow(x, power1)
    power2 = tf.to_double(tf.constant(2, shape=(tf.shape(x)).numpy()))
    mod2 = tf.pow(x, power2)
    raw = mod1 - 16.0 * mod2 + 5.0 * x
    summed = tf.reduce_sum(raw, 1)
    return 0.5 * summed
def Rosenbrock(x):
    # orig: 100*math.pow(x[1]-math.pow(x[0],2),2) + math.pow(x[0]-1,2)
    x = tf.to_double(x)
    power = tf.to_double(tf.constant(2, shape=[
        np.array(tf.shape(x)[0]),
    ]))
    pow1 = tf.pow(x[:, 0], power)
    add1 = 100.0 * tf.pow(x[:, 1] - pow1, power)
    pow2 = tf.pow(tf.subtract(x[:, 0], 1.2), power)
    return add1 - pow2
def McCormick(x):
    # orig: math.sin(x[0]+x[1]) + math.pow(x[0]-x[1],2) - 1.5*x[0] + 2.5*x[1] + 1
    x = tf.to_double(x)
    power = tf.to_double(tf.constant(2, shape=[
        np.array(tf.shape(x)[0]),
    ]))
    add1 = tf.sin(x[:, 0] + x[:, 1])
    add2 = tf.pow(x[:, 0] + x[:, 1], power)
    add3 = tf.add(-1.5 * x[:, 0] + 2.5 * x[:, 1], 1.0)
    return add1 + add2 + add3
def Michalewicz(x):
    # orig: - sum_i^n (sin(x_i)sin^20(i*x_i^2/pi))
    x = tf.to_double(x)
    out = tf.to_double(tf.constant(0, shape=[
        np.array(tf.shape(x)[0]),
    ]))
    n = tf.to_double(tf.shape(x)[1])
    for i in range(n):
        out += tf.sin(x[:, i]) * tf.pow(
            tf.sin((i + 1) / np.pi * tf.pow(x[:, i], 2)), 20)
    return -1 * out
def Booth(x):
    # orig: math.pow(x[0]+2*x[1]-7,2)+math.pow(2*x[0]+x[1]-5,2)
    x = tf.to_double(x)
    power = tf.to_double(tf.constant(2, shape=[
        np.array(tf.shape(x)[0]),
    ]))
    add1 = tf.subtract(x[:, 0] + 2 * x[:, 1], 7.2)
    add1 = tf.pow(add1, power)
    add2 = tf.subtract(2 * x[:, 0] + x[:, 1], 5.2)
    add2 = tf.pow(add2, power)
    return add1 + add2
def Trid(x):
    # orig: sum_i^n [(x_i-1)^2] - sum_i^n [x_i*x_(i-1)]
    x = tf.to_double(x)
    sub = tf.to_double(tf.constant(1, shape=tf.shape(x).numpy()))
    power_2 = tf.to_double(tf.constant(2, shape=tf.shape(x).numpy()))
    add1 = tf.pow(x - sub, power_2)
    add2 = tf.to_double(tf.constant(0, shape=[
        np.array(tf.shape(x)[0]),
    ]))
    for i in range(1, tf.shape(x).numpy()[1]):
        add2 = tf.add(add2, x[:, i] * x[:, i - 1])
    return tf.reduce_sum(add1, 1) - add2
def compute_coarse_matrix(model, n, A_stencil, A_matrices,grid_size,bb=True):
    if bb == True:
        P_stencil = model(A_stencil, True)
    else:
        P_stencil = model(A_stencil, phase="Test")
    P_matrix = tf.to_double(compute_p2_sparse(P_stencil, P_stencil.shape.as_list()[0], grid_size))
    P_matrix_t = tf.sparse_transpose(P_matrix, [0,2, 1])
    A_matrices = tf.squeeze(A_matrices)
    temp = tf.sparse_tensor_to_dense(P_matrix_t)
    q = tf.matmul(temp, tf.to_double(A_matrices))
    A_c = tf.transpose(tf.matmul(temp,tf.transpose(q,[0,2,1])),[0,2,1])
    return A_c, compute_stencil(tf.squeeze(A_c),(grid_size//2)),P_matrix,P_matrix_t
Example #27
0
 def cal_training_loss(self, y_out, y_label):
     y_out = tf.to_double(y_out)
     y_label = tf.to_double(y_label)
     o_out = tf.div(
         1 - self.softsign(
             tf.slice(y_out, [0, 0], [
                 self.train_config.label_length,
                 self.train_config.training_minibatch_size
             ])), 2)
     training_loss = tf.reduce_mean(tf.square(o_out -
                                              tf.transpose(y_label)))
     return training_loss
def preprocess_for_eval(image, labels, bboxes, out_shape, resize,
                        scope='ssd_preprocessing_train'):
    """Preprocess an image for evaluation.

    Args:
        image: A `Tensor` representing an image of arbitrary size.
        out_shape: Output shape after pre-processing (if resize != None)
        resize: Resize strategy.

    Returns:
        A preprocessed image.
    """
    with tf.name_scope(scope):
        if image.get_shape().ndims != 3:
            raise ValueError('Input must be of size [height, width, C>0]')

        image = tf.to_float(image)
        image = tf_image_whitened(image, [_R_MEAN, _G_MEAN, _B_MEAN])

        # Add image rectangle to bboxes.
        bbox_img = tf.constant([[0., 0., 1., 1.]])
        if bboxes is None:
            bboxes = bbox_img
        else:
            bboxes = tf.concat(0, [bbox_img, bboxes])

        # Resize strategy...
        if resize == Resize.NONE:
            pass
        elif resize == Resize.CENTRAL_CROP:
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.PAD_AND_RESIZE:
            # Resize image first: find the correct factor...
            shape = tf.shape(image)
            factor = tf.minimum(tf.to_double(1.0),
                                tf.minimum(tf.to_double(out_shape[0] / shape[0]),
                                           tf.to_double(out_shape[1] / shape[1])))
            resize_shape = factor * tf.to_double(shape[0:2])
            resize_shape = tf.cast(tf.floor(resize_shape), tf.int32)

            image = tf_image.resize_image(image, resize_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)
            # Pad to expected size.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])

        # Split back bounding boxes.
        bbox_img = bboxes[0]
        bboxes = bboxes[1:]
        return image, labels, bboxes, bbox_img
def GoldsteinPrice(x):
    #orig: [1+(x_1+x_2+1)^2*(19-14x_1+3x_1^2-14x_2+6x_1x_2+3x_2^2)]*[30+(2x_1-3x_2)^2*(18-32x_1+12x_1^2+48x_2-36x_1x_2+27x_2^2)]
    x = tf.to_double(x)
    ones = tf.to_double(tf.constant(1, shape=[
        np.array(tf.shape(x)[0]),
    ]))
    mul1 = ones + tf.pow(x[:, 0] + x[:, 1] + ones, 2) * (
        19 * ones - 14 * x[:, 0] + 3 * tf.pow(x[:, 0], 2) - 14 * x[:, 1] +
        6 * x[:, 0] * x[:, 1] + 3 * tf.pow(x[:, 1], 2))
    mul2 = 30 * ones + tf.pow(2 * x[:, 0] - 3 * x[:, 1], 2) * (
        18 * ones - 32 * x[:, 0] + 12 * tf.pow(x[:, 0], 2) + 48 * x[:, 1] -
        36 * x[:, 0] * x[:, 1] + 27 * tf.pow(x[:, 1], 2))
    return mul1 * mul2
Example #30
0
def median(x):
    x = tf.reshape(x, [-1])
    med = tf.floordiv(tf.shape(x)[0], 2)
    check_parity = tf.equal(tf.to_double(med),
                            tf.divide(tf.to_double(tf.shape(x)[0]), 2.))

    def is_true():
        return 0.5 * tf.reduce_sum(tf.nn.top_k(x, med + 1).values[-2:])

    def is_false():
        return tf.nn.top_k(x, med + 1).values[-1]

    return tf.cond(check_parity, is_true, is_false)
Example #31
0
def _to_term_frequency(x, vocab_size):
  """Creates a SparseTensor of term frequency for every doc/term pair.

  Args:
    x : a SparseTensor of int64 representing string indices in vocab.
    vocab_size: A scalar int64 Tensor - the count of vocab used to turn the
        string into int64s including any OOV buckets.

  Returns:
    a SparseTensor with the count of times a term appears in a document at
        indices <doc_index_in_batch>, <term_index_in_vocab>,
        with size (num_docs_in_batch, vocab_size).
  """
  # Construct intermediary sparse tensor with indices
  # [<doc>, <term_index_in_doc>, <vocab_id>] and tf.ones values.
  vocab_size = tf.convert_to_tensor(vocab_size, dtype=tf.int64)
  split_indices = tf.to_int64(
      tf.split(x.indices, axis=1, num_or_size_splits=2))
  expanded_values = tf.to_int64(tf.expand_dims(x.values, 1))
  next_index = tf.concat(
      [split_indices[0], split_indices[1], expanded_values], axis=1)

  next_values = tf.ones_like(x.values)
  expanded_vocab_size = tf.expand_dims(vocab_size, 0)
  next_shape = tf.concat(
      [x.dense_shape, expanded_vocab_size], 0)

  next_tensor = tf.SparseTensor(
      indices=tf.to_int64(next_index),
      values=next_values,
      dense_shape=next_shape)

  # Take the intermediary tensor and reduce over the term_index_in_doc
  # dimension. This produces a tensor with indices [<doc_id>, <term_id>]
  # and values [count_of_term_in_doc] and shape batch x vocab_size
  term_count_per_doc = tf.sparse_reduce_sum_sparse(next_tensor, 1)

  dense_doc_sizes = tf.to_double(tf.sparse_reduce_sum(tf.SparseTensor(
      indices=x.indices,
      values=tf.ones_like(x.values),
      dense_shape=x.dense_shape), 1))

  gather_indices = term_count_per_doc.indices[:, 0]
  gathered_doc_sizes = tf.gather(dense_doc_sizes, gather_indices)

  term_frequency = (tf.to_double(term_count_per_doc.values) /
                    tf.to_double(gathered_doc_sizes))
  return tf.SparseTensor(
      indices=term_count_per_doc.indices,
      values=term_frequency,
      dense_shape=term_count_per_doc.dense_shape)
Example #32
0
def train_once(loss=1.6):
    
    min_loss = loss
    os.system("rm -rf ./log/*")
      
    X = tf.placeholder(tf.float64, [None, 39])
    Y = tf.placeholder(tf.float64, [None, 1]) 
      
    b0 = tf.Variable(tf.to_double(tf.random_normal([39])), dtype=tf.float64)    #39
    w1 = tf.Variable(tf.to_double(tf.random_normal([39, 1])), dtype=tf.float64) #39
    b1 = tf.Variable(tf.to_double(tf.random_normal([1])), dtype=tf.float64)     #1
    
    input_layer = tf.add(X, b0)
    hidden_layer_one = tf.matmul(input_layer, w1) + b1
    pred = tf.nn.relu(hidden_layer_one)  


    cost = tf.reduce_mean(tf.pow(tf.subtract(Y, pred), 2))  
    #loss = tf.reduce_mean(tf.exp(tf.abs(tf.subtract(Y, pred))))
    
    optimizer=tf.train.AdamOptimizer(learning_rate).minimize(cost)
      
    init = tf.global_variables_initializer()    
    saver= tf.train.Saver()
    sess = tf.Session()  
    sess.run(init)  
    
    for iteration in range(1, iterations):  
          
        batch_X,batch_Y=get_batch(batch_size)
    
        test_cost = sess.run(cost,feed_dict={X:train_x[train_num:], Y:train_y[train_num:].reshape(val_num, 1)})
              
       
        if test_cost < 10.0:
            if min_loss > test_cost:
                min_loss = test_cost
                os.system("rm result.csv")
                os.system("rm ./model/*")
                print("test cost: ", test_cost, "store into model")
                saver.save(sess, "./model/model.ckpt")

                result = sess.run(pred,feed_dict={X:test_x})
                cf = open('result.csv', 'a')
                wcf = csv.writer(cf)
                for i in result:
                    wcf.writerow([("%.6f" % i[0]), ])

        _,batch_cost=sess.run([optimizer,cost],feed_dict={X:batch_X,Y:batch_Y})  
        print("Iteration :","%04d"%(iteration+1),"Train_cost :","{:.9f}".format(batch_cost),"Test_cost :","{:.9f}".format(test_cost))  
    return min_loss
Example #33
0
 def body(j, loss):
     idx = tf.expand_dims(targets[j, :tar_len[j]], 1)
     ac = tf.transpose(tf.gather_nd(tf.transpose(action_probs[j]), idx))
     st = tf.transpose(term_probs[j], (1, 0, 2))
     st = tf.transpose(tf.gather_nd(st, idx), (1, 0, 2))
     length = seq_len[j]
     if safe:
         loss += -forward_tac_log(ac, st, length) / tf.to_double(
             bs)  # negative log likelihood
     else:
         loss += -tf.reduce_sum(
             tf.log(forward_tac_tf(ac, st, length)) /
             tf.to_double(bs))  # negative log likelihood for whole batch
     return tf.add(j, 1), loss  # average loss over batches
Example #34
0
def preprocess_for_eval_multi(image,
                              labels,
                              bboxes,
                              out_shape,
                              resize,
                              scope='ssd_preprocessing_train'):
    with tf.name_scope(scope):

        image = tf.to_float(image)
        #image = tf_image_whitened(image, [_R_MEAN, _G_MEAN, _B_MEAN])
        image = image - np.array([123.6800, 116.7790, 103.9390]).reshape(
            (1, 1, 1, 3))

        # Add image rectangle to bboxes.
        bbox_img = tf.constant([[0., 0., 1., 1.]])
        if bboxes is None:
            bboxes = bbox_img
        else:
            bboxes = tf.concat(0, [bbox_img, bboxes])

        # Resize strategy...
        if resize == Resize.NONE:
            pass
        elif resize == Resize.CENTRAL_CROP:
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.PAD_AND_RESIZE:
            # Resize image first: find the correct factor...
            shape = tf.shape(image)
            factor = tf.minimum(
                tf.to_double(1.0),
                tf.minimum(tf.to_double(out_shape[0] / shape[0]),
                           tf.to_double(out_shape[1] / shape[1])))
            resize_shape = factor * tf.to_double(shape[0:2])
            resize_shape = tf.cast(tf.floor(resize_shape), tf.int32)

            image = tf_image.resize_image(
                image,
                resize_shape,
                method=tf.image.ResizeMethod.BILINEAR,
                align_corners=False)
            # Pad to expected size.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])

        # Split back bounding boxes.
        bbox_img = bboxes[0]
        bboxes = bboxes[1:]
        return image, labels, bboxes, bbox_img
Example #35
0
 def staircase_quantizer(self, message, beta):
     sigma_square = tf.to_double(5 *
                                 (10**(tf.log(2 * beta) / tf.log(3.0) - 5)))
     nambda = beta / (beta - (beta / (2**2 - 1)))
     label = beta * np.array([1 / 3, 1])
     label0 = tf.to_double(label)
     label1 = tf.to_double(label * nambda)
     message = tf.to_double(
         tf.clip_by_value(message, -beta - 0.1, beta + 0.1))
     a = label0[0]*tf.exp(-tf.square(message-label1[0])/2/sigma_square) + label0[1]*tf.exp(-tf.square(message-label1[1])/2/sigma_square)-\
         label0[0]*tf.exp(-tf.square(message+label1[0])/2/sigma_square) - label0[1]*tf.exp(-tf.square(message+label1[1])/2/sigma_square)
     b = tf.exp(-tf.square(message-label1[0])/2/sigma_square) + tf.exp(-tf.square(message-label1[1])/2/sigma_square)+\
         tf.exp(-tf.square(message+label1[0])/2/sigma_square) + tf.exp(-tf.square(message+label1[1])/2/sigma_square)
     quantized_message = tf.to_float(a / b)
     return quantized_message
    def __init__(self, nn_settings, mnistHandler):
        self.variables  = {}
        self.operations = {}
        self.vis        = {}
        self.mnistHandler   = mnistHandler
        self.nn_settings    = nn_settings
        self.conv_specs     = nn_settings['conv_specs']
        self.conv_layers    = nn_settings['conv_layers']
        self.hidden_layer   = nn_settings['hidden_layer']
        self.do_batch_norm  = nn_settings['do_batch_norm']
        self.layers     = []
        self.train_phase    =   tf.placeholder(tf.bool, name='train_phase')

        self.y = self.define_graph()
        # cost function
        cross_entropy = -tf.reduce_sum(self.y_*
                         tf.log(tf.clip_by_value(self.y,1e-10,1.0)))
        
        # optimisation function
        self.train_step = tf.train.AdamOptimizer(mnistHandler.settings['LEARNING_RATE']).minimize(cross_entropy)
        
        # evaluation
        correct_prediction = tf.equal(tf.argmax(self.y,1),
                                      tf.argmax(self.y_,1))
        
        self.accuracy = tf.reduce_mean(tf.to_double(correct_prediction))
        
        # prediction function
        # return the index with the highest probability
        self.predict = tf.argmax(self.y,1)
Example #37
0
  def testMatchWithAffineTransform(self):
    direct_bj = tfb.Tanh()
    indirect_bj = tfb.Chain([
        tfb.AffineScalar(shift=tf.to_double(-1.0), scale=tf.to_double(2.0)),
        tfb.Sigmoid(),
        tfb.AffineScalar(scale=tf.to_double(2.0))])

    x = np.linspace(-3.0, 3.0, 100)
    y = np.tanh(x)
    self.assertAllClose(self.evaluate(direct_bj.forward(x)),
                        self.evaluate(indirect_bj.forward(x)))
    self.assertAllClose(self.evaluate(direct_bj.inverse(y)),
                        self.evaluate(indirect_bj.inverse(y)))
    self.assertAllClose(
        self.evaluate(direct_bj.inverse_log_det_jacobian(y, event_ndims=0)),
        self.evaluate(indirect_bj.inverse_log_det_jacobian(y, event_ndims=0)))
    self.assertAllClose(
        self.evaluate(direct_bj.forward_log_det_jacobian(x, event_ndims=0)),
        self.evaluate(indirect_bj.forward_log_det_jacobian(x, event_ndims=0)))
 def one_bp_iteration(self, xe_v2c_pre_iter, H_sumC_to_V, H_sumV_to_C, xe_0):
     xe_tanh = tf.tanh(tf.to_double(tf.truediv(xe_v2c_pre_iter, [2.0])))
     xe_tanh = tf.to_float(xe_tanh)
     xe_tanh_temp = tf.sign(xe_tanh)
     xe_sum_log_img = tf.matmul(H_sumC_to_V, tf.multiply(tf.truediv((1 - xe_tanh_temp), [2.0]), [3.1415926]))
     xe_sum_log_real = tf.matmul(H_sumC_to_V, tf.log(1e-8 + tf.abs(xe_tanh)))
     xe_sum_log_complex = tf.complex(xe_sum_log_real, xe_sum_log_img)
     xe_product = tf.real(tf.exp(xe_sum_log_complex))
     xe_product_temp = tf.multiply(tf.sign(xe_product), -2e-7)
     xe_pd_modified = tf.add(xe_product, xe_product_temp)
     xe_v_sumc = tf.multiply(self.atanh(xe_pd_modified), [2.0])
     xe_c_sumv = tf.add(xe_0, tf.matmul(H_sumV_to_C, xe_v_sumc))
     return xe_v_sumc, xe_c_sumv
Example #39
0
  def __init__(self):
    # debugging structure
    self.variables      = {}
    self.operations     = {}
    self.vis            = {}

    self.layers         = []

    self.train_phase    = tf.placeholder(tf.bool, name='train_phase')
    self.learning_rate  = tf.placeholder(tf.float32, name='learning_rate')
    self.keep_prob      = tf.placeholder('float')

    # TODO maybe make test and train different graphs
   # with tf.variable_scope('main') as scope:
   #     self.test_y   = self.define_graph()
   #     scope.reuse_variables()
   #     self.train_y  = self.define_graph()
    self.y = self.define_graph()

    # 50x1 => 50
    self.y_ = tf.reshape(self.y_, [-1])
    self.y_ = tf.one_hot(self.y_, 10, on_value=1, off_value=0, axis=-1)
    y_float = tf.cast(self.y_, tf.float32)

    # cost function
    cross_entropy = -tf.reduce_sum(y_float*
                     tf.log(tf.clip_by_value(self.y,1e-10,1.0)))

    tf.scalar_summary('cross_entropy', cross_entropy)
    
    # evaluation
    correct_prediction = tf.equal(tf.argmax(self.y,1),
                                  tf.argmax(self.y_,1))
    
    self.accuracy = tf.reduce_mean(tf.to_double(correct_prediction))
    tf.scalar_summary('accuracy', self.accuracy)
    
    # optimisation function
    # for adam betas are rejection factors
    self.train_step = tf.train.AdamOptimizer(
        learning_rate=self.learning_rate,
        beta1=BETA1,
        beta2=BETA2,
        epsilon=1e-08
    ).minimize(cross_entropy)
    
    # return the index with the highest probability
    self.predict = tf.argmax(self.y,1)

    self.summaries = tf.merge_all_summaries()
    def __init__(self, nn_settings, mnistHandler):
        # debugging structure
        self.variables      = {}
        self.operations     = {}
        self.vis            = {}

        self.mnistHandler   = mnistHandler
        self.nn_settings    = nn_settings
        self.conv_specs     = nn_settings['conv_specs']
        self.conv_layers    = nn_settings['conv_layers']
        self.hidden_layer   = nn_settings['hidden_layer']
        self.do_batch_norm  = nn_settings['do_batch_norm']
        self.activation_fn  = nn_settings['activation_fn']
        self.layers         = []
        LEARNING_RATE       = mnistHandler.settings['LEARNING_RATE']
        BATCH_SIZE          = mnistHandler.settings['BATCH_SIZE']
        self.train_phase    = tf.placeholder(tf.bool, name='train_phase')
        self.learning_rate  = tf.placeholder(tf.float32, name='learning_rate')
        self.keep_prob = tf.placeholder('float')

        self.y = self.define_graph()
        # cost function
        cross_entropy = -tf.reduce_sum(self.y_*
                         tf.log(tf.clip_by_value(self.y,1e-10,1.0)))
        
        # optimisation function
        # for adam betas are rejection factors
        self.train_step = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate,
            beta1=mnistHandler.settings['BETA1'],
            beta2=mnistHandler.settings['BETA2'],
            epsilon=1e-08
        ).minimize(cross_entropy)
        
        # evaluation
        correct_prediction = tf.equal(tf.argmax(self.y,1),
                                      tf.argmax(self.y_,1))
        
        self.accuracy = tf.reduce_mean(tf.to_double(correct_prediction))
        
        # return the index with the highest probability
        self.predict = tf.argmax(self.y,1)
 def testRepr(self):
   k32_batch_unk = TestKernel(tf.placeholder(tf.float32))
   k32_batch2 = TestKernel(tf.to_float([123., 456.]))
   k64_batch2x1 = TestKernel(tf.to_double([[123.], [456.]]))
   k_fdim3 = TestKernel(tf.to_float(123.), feature_ndims=3)
   self.assertEqual(
       '<tfp.positive_semidefinite_kernels.TestKernel '
       '\'TestKernel\' batch_shape=<unknown> feature_ndims=1 dtype=float32>',
       repr(k32_batch_unk))
   self.assertEqual(
       '<tfp.positive_semidefinite_kernels.TestKernel '
       '\'TestKernel\' batch_shape=(2,) feature_ndims=1 dtype=float32>',
       repr(k32_batch2))
   self.assertEqual(
       '<tfp.positive_semidefinite_kernels.TestKernel '
       '\'TestKernel\' batch_shape=(2, 1) feature_ndims=1 dtype=float64>',
       repr(k64_batch2x1))
   self.assertEqual(
       '<tfp.positive_semidefinite_kernels.TestKernel '
       '\'TestKernel\' batch_shape=() feature_ndims=3 dtype=float32>',
       repr(k_fdim3))
 def testStr(self):
   k32_batch_unk = TestKernel(tf.placeholder(tf.float32))
   k32_batch2 = TestKernel(tf.to_float([123., 456.]))
   k64_batch2x1 = TestKernel(tf.to_double([[123.], [456.]]))
   k_fdim3 = TestKernel(tf.to_float(123.), feature_ndims=3)
   self.assertEqual(
       'tfp.positive_semidefinite_kernels.TestKernel('
       '"TestKernel", feature_ndims=1, dtype=float32)',
       str(k32_batch_unk))
   self.assertEqual(
       'tfp.positive_semidefinite_kernels.TestKernel('
       '"TestKernel", batch_shape=(2,), feature_ndims=1, dtype=float32)',
       str(k32_batch2))
   self.assertEqual(
       'tfp.positive_semidefinite_kernels.TestKernel('
       '"TestKernel", batch_shape=(2, 1), feature_ndims=1, dtype=float64)',
       str(k64_batch2x1))
   self.assertEqual(
       'tfp.positive_semidefinite_kernels.TestKernel('
       '"TestKernel", batch_shape=(), feature_ndims=3, dtype=float32)',
       str(k_fdim3))
def gen_model(name, license, model, model_file, version=VERSION, featurize=True):
    g = tf.Graph()
    with tf.Session(graph=g) as session:
        K.set_learning_phase(0)
        inTensor = tf.placeholder(dtype=tf.string, shape=[], name="%s_input" % name)
        decoded = tf.decode_raw(inTensor, tf.uint8)
        imageTensor = tf.to_float(
            tf.reshape(
                decoded,
                shape=[
                    1,
                    model.inputShape()[0],
                    model.inputShape()[1],
                    3]))
        m = model.model(preprocessed=model.preprocess(imageTensor), featurize=featurize)
        outTensor = tf.to_double(tf.reshape(m.output, [-1]), name="%s_sparkdl_output__" % name)
        gdef = tfx.strip_and_freeze_until([outTensor], session.graph, session, False)
    g2 = tf.Graph()
    with tf.Session(graph=g2) as session:
        tf.import_graph_def(gdef, name='')
        filename = "sparkdl-%s_%s.pb" % (name, version)
        print('writing out ', filename)
        tf.train.write_graph(g2.as_graph_def(), logdir="./", name=filename, as_text=False)
        with open("./" + filename, "r") as f:
            h = sha256(f.read()).digest()
            base64_hash = b64encode(h)
            print('h', base64_hash)
    model_file.write(indent(
        scala_template % {
            "license": license,
            "name": name,
            "height": model.inputShape()[0],
            "width": model.inputShape()[1],
            "filename": filename,
            "base64": base64_hash},2))
    return g2
def preprocess_for_eval(image, labels, bboxes,
                        out_shape=EVAL_SIZE, data_format='NHWC',
                        difficults=None, resize=Resize.WARP_RESIZE,
                        scope='ssd_preprocessing_train'):
    """Preprocess an image for evaluation.

    Args:
        image: A `Tensor` representing an image of arbitrary size.
        out_shape: Output shape after pre-processing (if resize != None)
        resize: Resize strategy.

    Returns:
        A preprocessed image.
    """
    with tf.name_scope(scope):
        if image.get_shape().ndims != 3:
            raise ValueError('Input must be of size [height, width, C>0]')

        image = tf.to_float(image)
        image = tf_image_whitened(image, [_R_MEAN, _G_MEAN, _B_MEAN])

        # Add image rectangle to bboxes.
        bbox_img = tf.constant([[0., 0., 1., 1.]])
        if bboxes is None:
            bboxes = bbox_img
        else:
            bboxes = tf.concat([bbox_img, bboxes], axis=0)

        if resize == Resize.NONE:
            # No resizing...
            pass
        elif resize == Resize.CENTRAL_CROP:
            # Central cropping of the image.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.PAD_AND_RESIZE:
            # Resize image first: find the correct factor...
            shape = tf.shape(image)
            factor = tf.minimum(tf.to_double(1.0),
                                tf.minimum(tf.to_double(out_shape[0] / shape[0]),
                                           tf.to_double(out_shape[1] / shape[1])))
            resize_shape = factor * tf.to_double(shape[0:2])
            resize_shape = tf.cast(tf.floor(resize_shape), tf.int32)

            image = tf_image.resize_image(image, resize_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)
            # Pad to expected size.
            image, bboxes = tf_image.resize_image_bboxes_with_crop_or_pad(
                image, bboxes, out_shape[0], out_shape[1])
        elif resize == Resize.WARP_RESIZE:
            # Warp resize of the image.
            image = tf_image.resize_image(image, out_shape,
                                          method=tf.image.ResizeMethod.BILINEAR,
                                          align_corners=False)

        # Split back bounding boxes.
        bbox_img = bboxes[0]
        bboxes = bboxes[1:]
        # Remove difficult boxes.
        if difficults is not None:
            mask = tf.logical_not(tf.cast(difficults, tf.bool))
            labels = tf.boolean_mask(labels, mask)
            bboxes = tf.boolean_mask(bboxes, mask)
        # Image data format.
        if data_format == 'NCHW':
            image = tf.transpose(image, perm=(2, 0, 1))
        return image, labels, bboxes, bbox_img
Example #45
0
def DefineLoss(x, y, g_list, g_list_omega, params, trainable_var):
	# Minimize the mean squared errors.
	# subtraction and squaring element-wise, then average over both dimensions
	# n columns
	# average of each row (across columns), then average the rows
	den_nonzero = 10**(-5)


	# autoencoder loss
	if params['relative_loss']:
		loss1den = tf.reduce_mean(tf.reduce_mean(tf.square(tf.squeeze(x[0,:,:])),1))+den_nonzero
	else:
		loss1den = tf.to_double(1.0)
	loss1 = params['recon_lam']*tf.truediv(tf.reduce_mean(tf.reduce_mean(tf.square(y[0] - tf.squeeze(x[0,:,:])),1)), loss1den)

	# gets dynamics
	loss2 = tf.zeros([1,], dtype=tf.float64)
	if params['num_shifts'] > 0:
		for j in np.arange(params['num_shifts']):
			# xk+1, xk+2, xk+3
			shift = params['shifts'][j]
			if params['relative_loss']:
				loss2den = tf.reduce_mean(tf.reduce_mean(tf.square(tf.squeeze(x[shift,:,:])),1))+den_nonzero
			else:
				loss2den = tf.to_double(1.0)
			loss2 = loss2 + params['recon_lam']*tf.truediv(tf.reduce_mean(tf.reduce_mean(tf.square(y[j+1] - tf.squeeze(x[shift,:,:])),1)), loss2den)
		loss2 = loss2/params['num_shifts']

	# K linear
	loss3 = tf.zeros([1,], dtype=tf.float64)
	countSM = 0
	if params['num_shifts_middle'] > 0:
		next_step = net.varying_multiply(g_list[0], g_list_omega[0], params['deltat'])
		for j in np.arange(max(params['shifts_middle'])):
	                if ((j+1) in params['shifts_middle']):
				# muliply g_list[0] by L (j+1) times
				# next_step = tf.matmul(g_list[0], L_pow)
				if params['relative_loss']:
					loss3den = tf.reduce_mean(tf.reduce_mean(tf.square(tf.squeeze(g_list[countSM+1])),1))+den_nonzero
				else:
					loss3den = tf.to_double(1.0)
				loss3 = loss3 + params['mid_shift_lam']*tf.truediv(tf.reduce_mean(tf.reduce_mean(tf.square(next_step - g_list[countSM+1]),1)),loss3den)
				countSM +=1
			# hopefully still on correct traj, so same omegas as before
			next_step = net.varying_multiply(next_step, g_list_omega[j+1], params['deltat'])
		loss3 = loss3/params['num_shifts_middle']


	if params['reg_lam']:
		l1_regularizer = tf.contrib.layers.l1_regularizer(scale=params['reg_lam'],scope=None)
		# TODO: don't include biases? use weights dict instead?
		loss_L1 = tf.contrib.layers.apply_regularization(l1_regularizer, weights_list=trainable_var)
	else:
		loss_L1 = tf.zeros([1,], dtype=tf.float64)

	# tf.nn.l2_loss returns number
	l2_regularizer = tf.add_n([tf.nn.l2_loss(v) for v in trainable_var if 'b' not in v.name])
	loss_L2 = params['l2_lam'] * l2_regularizer

	# inf norm on autoencoder error
	linf1_den = tf.norm(tf.norm(tf.squeeze(x[0,:,:]),axis=1,ord=np.inf),ord=np.inf)+den_nonzero
	linf2_den = tf.norm(tf.norm(tf.squeeze(x[1,:,:]),axis=1,ord=np.inf),ord=np.inf)+den_nonzero
	regularization_penalty_Linf_1 = tf.truediv(tf.norm(tf.norm(y[0] - tf.squeeze(x[0,:,:]),axis=1,ord=np.inf),ord=np.inf), linf1_den)
	regularization_penalty_Linf_2 = tf.truediv(tf.norm(tf.norm(y[1] - tf.squeeze(x[1,:,:]),axis=1,ord=np.inf),ord=np.inf), linf2_den)
	loss_Linf = params['reg_inf_lam'] * (regularization_penalty_Linf_1 + regularization_penalty_Linf_2)

	loss = loss1 + loss2 + loss3 + loss_Linf
	regularized_loss = loss + loss_L1 + loss_L2

	return loss1, loss2, loss3, loss_Linf, loss_L1, loss_L2, loss, regularized_loss
tf.square(a)#计算平方
tf.round(d)#舍入最接近的整数
tf.sqrt(a)#开方
tf.pow(a,b)#a的b次方
tf.exp(a)#e的a次方
tf.log(a)#一次输入是以e为底a的对数,两次输入是以第二个为底
tf.maximum(a,b)#返回最大值
tf.minimum(a,b)#返回最小值
tf.cos(a)#三角函数cos



#数据类型转换
e = tf.constant("abcde")
tf.string_to_number(e)#字符串转换为数字
tf.to_double(a)
tf.cast(a)#转换为整数,比如1.8 = 1,2.2 = 2

#形状操作
tf.shape()#返回数据的shape
tf.size()#返回数据的元素数量
tf.rank()#返回tensor的rank
tf.reshape()#改变tensor的形状
tf.expand_dims()#插入维度1进入一个tensor

#切片与合并
tf.slice()#切片操作
tf.split()#沿着某一维度将tensor分离
tf.concat()#沿着某一维度连接
tf.pack()#将一系列rank-R的tensor打包为一个rank-(R+1)的tensor
tf.reverse()#沿着某维度进行序列反转
Example #47
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved
#
########################################################################

"""
File: d_tensor.py
Author: hanjiatong([email protected])
Date: 2016/08/21 16:38:51
"""
import tensorflow as tf

a = tf.constant([1, 2])
print a
print tf.to_double(a)

b = tf.constant("b")
Example #48
0
# The input data
data = [Row(x=[float(x), float(2 * x)], key=str(x % 2)) for x in range(1, 6)]
df = sqlContext.createDataFrame(data)
df = tfs.analyze(sqlContext.createDataFrame(data))

# The geometric mean:
# TODO(tjh) make a test out of this, it found some bugs
# - non numeric columns (string)
# - unused columns
# - output that has a child
col_name = "x"
col_key = "key"
with tf.Graph().as_default() as g:
    x = tfs.block(df, col_name)
    invs = tf.inv(tf.to_double(x), name="invs")
    df2 = tfs.map_blocks([invs, tf.ones_like(invs, name="count")], df)


# The geometric mean
gb = df2.select(col_key, "invs", "count").groupBy("key")
with tf.Graph().as_default() as g:
    x_input = tfs.block(df2, "invs", tf_name="invs_input")
    count_input = tfs.block(df2, "invs", tf_name="count_input")
    x = tf.reduce_sum(x_input, [0], name='invs')
    count = tf.reduce_sum(count_input, [0], name='count')
    df3 = tfs.aggregate([x, count], gb)

with tf.Graph().as_default() as g:
    invs = tfs.block(df2, "invs")
    count = tfs.block(df2, "count")
Example #49
0
def log_partition_function(num_nodes,
                           scores,
                           forest=False,
                           max_dynamic_range=None):
  r"""Returns the log of the sum-of-product of spanning trees or forests.

  Computing the sum-of-product in the log domain reduces the chance of overflow
  or underflow, and ML techniques (e.g., CRF loss functions) typically require
  the log partition function anyways.  For similar reasons, the scores input is
  assumed to be specified in the log domain.

  The partition function is caluclated via application of the Matrix-Tree
  theorem; see the following for details:
    https://en.wikipedia.org/wiki/Kirchhoff%27s_theorem
    http://www.aclweb.org/anthology/D/D07/D07-1015.pdf

  Computing the gradient of the log partition function requires inverting the
  Laplacian matrix.  Numerical issues may occur if the Laplacian is singular or
  nearly-so.  (Intuitively, the Laplacian will be close to singular when the
  input scores strongly favor invalid structures such as cycles).  In the EMNLP
  paper, we alleviated the numerical issues by clipping the difference between
  the minimum and maximum score for each node to 20 (in the log domain).  The
  |max_dynamic_range| argument can be used for this purpose.

  TODO(googleuser): Try improving the condition number of the Laplacian matrix
  directly, instead of using the indirect approach above.  For example, one
  could add c*I to the Laplacian (i.e., Tikhonov regularization).

  Args:
    num_nodes: [B] vector of graph sizes per batch item.
    scores: [B,M,M] tensor of padded batched arc and root scores, in the format
      used by the maximum_spanning_tree() op.  Padding values must be finite.
    forest: If true, sum over spanning forests instead of trees.
    max_dynamic_range: If specified, incoming scores for each node are clipped
      to at most this far from the maximum such score (in the log domain).

  Returns:
    [B] vector Z of log partition function values, where
      Z[b] = log(
          \sum_{tree spanning batch item b}
              score(root_of(tree)) \prod_{arc in tree} score(arc))
  """
  orig_dtype = scores.dtype.base_dtype
  scores_bxmxm = tf.to_double(scores)  # use doubles to reduce under/overflow
  shape_bxmxm = tf.shape(scores_bxmxm)
  batch_size = shape_bxmxm[0]
  max_nodes = shape_bxmxm[1]
  total_nodes = batch_size * max_nodes

  # To eliminate overflow, we locally normalize the scores.  Specifically, for
  # each node we divide its incoming arc scores and root selection score by the
  # maximum such score.  Since each node in a tree must select exactly one of
  # these scores (i.e., it is either a root or has exactly one incoming arc),
  # the local normalization factors are identical for all trees and can thus be
  # factored out of the sum over trees.
  #
  # More concretely, we find the maximum per node, divide all scores for that
  # node by the maximum, and then find the partition function of the normalized
  # scores.  Then we recover the un-normalized partition function by multiplying
  # the per-node maxima back in.  This final step is performed in the log domain
  # to avoid overflow.
  #
  # Note that underflow is still possible, but unlikely as long as the scores
  # are close to feasible (i.e., there is not too much mass on non-trees).  The
  # |max_dynamic_range| argument can be used to mitigate this.

  # Finding the maximum incoming score is difficult, because the batch padding
  # may contain arbitrary values.  We restrict the maximization to valid arcs
  # using tf.unsorted_segment_max() with a specially-constructed set of IDs.
  _, valid_tokens_bxm = digraph_ops.ValidArcAndTokenMasks(
      num_nodes, max_nodes, dtype=tf.int32)

  # Create a tensor of "target IDs".  In each row of each sub-matrix, the
  # positions of valid source tokens are filled with the 1-origin index of that
  # row in the entire batch, and zero elsewhere.  For example, given a batch
  # with num_nodes=[2, 3] we might have
  #   [[[1, 1, 0],
  #     [2, 2, 0],
  #     [3, 3, 0]],
  #    [[4, 4, 4],
  #     [5, 5, 5],
  #     [6, 6, 6]]]
  #
  # TODO(googleuser): The dynamic masking is pretty awkward.  Find an op that does
  # this (I looked, but maybe not hard enough), or write a custom op for this.
  valid_tokens_bx1xm = tf.expand_dims(valid_tokens_bxm, 1)
  valid_sources_bxmxm = tf.tile(valid_tokens_bx1xm, [1, max_nodes, 1])
  sequence_bm = 1 + tf.range(total_nodes, dtype=tf.int32)
  sequence_bxmx1 = tf.reshape(sequence_bm, [batch_size, max_nodes, 1])
  target_ids_bxmxm = valid_sources_bxmxm * sequence_bxmx1

  max_scores_bm1 = tf.unsorted_segment_max(scores_bxmxm, target_ids_bxmxm,
                                           total_nodes + 1)
  max_scores_bm = max_scores_bm1[1:]  # ID 0 corresponds to padding

  # Similar to above, we need to sum over the valid tokens.  We analogously use
  # tf.unsorted_segment_sum() with a specially-constructed set of "batch IDs".
  sequence_b = 1 + tf.range(batch_size, dtype=tf.int32)
  sequence_bx1 = tf.expand_dims(sequence_b, 1)
  batch_ids_bxm = valid_tokens_bxm * sequence_bx1
  batch_ids_bm = tf.reshape(batch_ids_bxm, [-1])

  log_normalization_factor_b1 = tf.unsorted_segment_sum(
      max_scores_bm, batch_ids_bm, batch_size + 1)
  log_normalization_factor_b = log_normalization_factor_b1[1:]

  # Locally-normalize and optionally clip the scores.
  max_scores_bxmx1 = tf.reshape(max_scores_bm, [batch_size, max_nodes, 1])
  scores_bxmxm -= max_scores_bxmx1
  if max_dynamic_range is not None:
    # After normalization, the scores are non-positive with max=0, so the
    # |max_dynamic_range| can be applied directly.
    #
    # PyLint thinks "-max_dynamic_range" is invalid because it defaults to None.

    scores_bxmxm = tf.maximum(scores_bxmxm, -max_dynamic_range)
  scores_bxmxm = tf.exp(scores_bxmxm)

  # Apply the Matrix-Tree theorem.
  exp_normalized_laplacian_bxmxm = digraph_ops.LaplacianMatrix(
      num_nodes, scores_bxmxm, forest=forest)
  log_normalized_partition_function_b = tf.log(
      tf.matrix_determinant(exp_normalized_laplacian_bxmxm))

  # Reapply the normalization factor that was divided out.
  log_partition_function_b = (
      log_normalized_partition_function_b + log_normalization_factor_b)
  return tf.cast(log_partition_function_b, orig_dtype)
Example #50
0
def main(_):
  assert FLAGS.output_dir, '--output_dir has to be provided'
  if not tf.gfile.Exists(FLAGS.output_dir):
    tf.gfile.MakeDirs(FLAGS.output_dir)
  params = model_lib.default_hparams()
  params.parse(FLAGS.hparams)
  tf.logging.info('User provided hparams: %s', FLAGS.hparams)
  tf.logging.info('All hyper parameters: %s', params)
  batch_size = params.batch_size
  graph = tf.Graph()
  with graph.as_default():
    with tf.device(tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks)):
      # dataset
      dataset, examples_per_epoch, num_classes, bounds = (
          dataset_factory.get_dataset(
              FLAGS.dataset,
              'train',
              batch_size,
              FLAGS.dataset_image_size,
              is_training=True))
      dataset_iterator = dataset.make_one_shot_iterator()
      images, labels = dataset_iterator.get_next()
      one_hot_labels = tf.one_hot(labels, num_classes)

      # set up model
      global_step = tf.train.get_or_create_global_step()
      model_fn = model_lib.get_model(FLAGS.model_name, num_classes)
      if params.train_adv_method == 'clean':
        logits = model_fn(images, is_training=True)
        adv_examples = None
      else:
        model_fn_eval_mode = lambda x: model_fn(x, is_training=False)
        adv_examples = adversarial_attack.generate_adversarial_examples(
            images, bounds, model_fn_eval_mode, params.train_adv_method)
        all_examples = tf.concat([images, adv_examples], axis=0)
        logits = model_fn(all_examples, is_training=True)
        one_hot_labels = tf.concat([one_hot_labels, one_hot_labels], axis=0)

      # update trainable variables if fine tuning is used
      model_lib.filter_trainable_variables(
          FLAGS.finetune_trainable_scopes)

      # set up losses
      total_loss = tf.losses.softmax_cross_entropy(
          onehot_labels=one_hot_labels,
          logits=logits,
          label_smoothing=params.label_smoothing)
      tf.summary.scalar('loss_xent', total_loss)

      if params.train_lp_weight > 0:
        images1, images2 = tf.split(logits, 2)
        loss_lp = tf.losses.mean_squared_error(
            images1, images2, weights=params.train_lp_weight)
        tf.summary.scalar('loss_lp', loss_lp)
        total_loss += loss_lp

      if params.weight_decay > 0:
        loss_wd = (
            params.weight_decay
            * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
        )
        tf.summary.scalar('loss_wd', loss_wd)
        total_loss += loss_wd

      # Setup the moving averages:
      if FLAGS.moving_average_decay and (FLAGS.moving_average_decay > 0):
        with tf.name_scope('moving_average'):
          moving_average_variables = tf.contrib.framework.get_model_variables()
          variable_averages = tf.train.ExponentialMovingAverage(
              FLAGS.moving_average_decay, global_step)
      else:
        moving_average_variables = None
        variable_averages = None

      # set up optimizer and training op
      learning_rate, steps_per_epoch = model_lib.get_lr_schedule(
          params, examples_per_epoch, FLAGS.replicas_to_aggregate)

      optimizer = model_lib.get_optimizer(params, learning_rate)

      optimizer = tf.train.SyncReplicasOptimizer(
          opt=optimizer,
          replicas_to_aggregate=FLAGS.replicas_to_aggregate,
          total_num_replicas=FLAGS.worker_replicas,
          variable_averages=variable_averages,
          variables_to_average=moving_average_variables)

      train_op = tf.contrib.training.create_train_op(
          total_loss, optimizer,
          update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))

      tf.summary.image('images', images[0:FLAGS.num_summary_images])
      if adv_examples is not None:
        tf.summary.image('adv_images', adv_examples[0:FLAGS.num_summary_images])
      tf.summary.scalar('total_loss', total_loss)
      tf.summary.scalar('learning_rate', learning_rate)
      tf.summary.scalar('current_epoch',
                        tf.to_double(global_step) / steps_per_epoch)

      # Training
      is_chief = FLAGS.task == 0

      scaffold = tf.train.Scaffold(
          init_fn=_get_finetuning_init_fn(variable_averages))
      hooks = [
          tf.train.LoggingTensorHook({'total_loss': total_loss,
                                      'global_step': global_step},
                                     every_n_iter=1),
          tf.train.NanTensorHook(total_loss),
      ]
      chief_only_hooks = [
          tf.train.SummarySaverHook(save_steps=FLAGS.save_summaries_steps,
                                    save_secs=FLAGS.save_summaries_secs,
                                    output_dir=FLAGS.output_dir,
                                    scaffold=scaffold),
          tf.train.CheckpointSaverHook(FLAGS.output_dir,
                                       save_steps=FLAGS.save_model_steps,
                                       scaffold=scaffold),
      ]

      if FLAGS.max_steps > 0:
        hooks.append(
            tf.train.StopAtStepHook(last_step=FLAGS.max_steps))

      # hook for sync replica training
      hooks.append(optimizer.make_session_run_hook(is_chief))

      with tf.train.MonitoredTrainingSession(
          master=FLAGS.master,
          is_chief=is_chief,
          checkpoint_dir=FLAGS.output_dir,
          scaffold=scaffold,
          hooks=hooks,
          chief_only_hooks=chief_only_hooks,
          save_checkpoint_secs=None,
          save_summaries_steps=None,
          save_summaries_secs=None) as session:
        while not session.should_stop():
          session.run([train_op])
Example #51
0
    def build_hmc_update(self, L=0, eps=1e-2):
        with tf.name_scope("gold_model"):
            self.joint_density_gold = self.joint_density(**self.symbols_gold)

            for latent in self.latents.values():
                grad = tf.gradients(self.joint_density_gold, latent["gold"])[0]
                momentum = latent["init_p"] + eps * grad / 2.0
                latent["grad"].append(grad)
                latent["momentum"].append(momentum)

        lps = []
        for i in range(L+1):
            with tf.name_scope("leapfrog_%d" % i):
                new_symbols = {}
                for name, latent in self.latents.items():                
                    old_x = latent["path"][i]
                    new_x = old_x + eps * latent["momentum"][i]
                    latent["path"].append(new_x)
                    new_symbols[name] = new_x
                    
                lp = self.joint_density(**new_symbols)
                lps.append(lp)
                for latent in self.latents.values():
                    grad = tf.gradients(lp, latent["path"][i+1])[0]
                    if i != L:
                        momentum = latent["momentum"][i] + eps * grad
                    else:
                        momentum = latent["momentum"][i] + eps * grad / 2.0
                    latent["grad"].append(grad)
                    latent["momentum"].append(momentum)

        for latent in self.latents.values():
            init_p = latent["init_p"]
            final_p = latent["momentum"][-1]                            
            latent["init_K"] = tf.reduce_sum(init_p*init_p)/2.0
            latent["final_K"] = tf.reduce_sum(final_p*final_p)/2.0

        with tf.name_scope("mh_updates"):
            init_Ks = tf.pack([latent["init_K"] for latent in self.latents.values()])
            self.init_K = tf.reduce_sum(init_Ks)

            final_Ks = tf.pack([latent["final_K"] for latent in self.latents.values()])
            self.final_K = tf.reduce_sum(final_Ks)

            self.final_density = lps[-1]
            self.mh_ratio = (self.final_density - self.final_K)  - (self.joint_density_gold - self.init_K)
            self.uniform = tf.placeholder(dtype=tf.float32, name="u")
            log_uniform = tf.log(self.uniform)
            self.accepted = log_uniform < self.mh_ratio 
            
            update_ops = []
            for name, latent in self.latents.items():

                if len(latent["shape"]) > 0:
                    accepted_vec = tf.tile(tf.reshape(self.accepted, (1,)), latent["shape"]) # HACK
                else:
                    accepted_vec = self.accepted
                gold_x = latent["gold"]
                proposed_x = latent["path"][-1]
                next_x = tf.select(accepted_vec, proposed_x, gold_x)
                update_ops.append(gold_x.assign(next_x))

            self.step_counter = tf.Variable(0)
            self.accept_counter = tf.Variable(0)
            self.accept_rate = tf.to_double(self.accept_counter) / tf.to_double(self.step_counter)
            update_ops.append(self.step_counter.assign_add(1))
            update_ops.append(self.accept_counter.assign_add(tf.select(self.accepted, 1, 0)))
            
            self.global_update = tf.group(*update_ops)
                
        return self.global_update
Example #52
0
os.system("rm result.csv")
os.system("rm -rf ./log/*")
load_test_data()
load_train_data()


l,_=train_x.shape
batch_times=int(l/batch_size)

  
X = tf.placeholder(tf.float64, [None, 39])  # 28*28  
Y = tf.placeholder(tf.float64, [None, 1])  # 1 float
  

  
weights = {"W_layer_one":tf.Variable(tf.to_double(tf.random_normal([39, hidden_one])), dtype=tf.float64),  
         "W_layer_two":tf.Variable(tf.to_double(tf.random_normal([hidden_one, hidden_two])), dtype=tf.float64),  
         "W_layer_thr":tf.Variable(tf.to_double(tf.random_normal([hidden_two, hidden_thr])), dtype=tf.float64),  
         "W_out_layer":tf.Variable(tf.to_double(tf.random_normal([hidden_thr, 1])), dtype=tf.float64)}  
biases = {"b_layer_one":tf.Variable(tf.to_double(tf.random_normal([hidden_one])), dtype=tf.float64),  
        "b_layer_two":tf.Variable(tf.to_double(tf.random_normal([hidden_two])), dtype=tf.float64),  
        "b_layer_thr":tf.Variable(tf.to_double(tf.random_normal([hidden_thr])), dtype=tf.float64),  
        "b_out_layer":tf.Variable(tf.to_double(tf.random_normal([1])),dtype=tf.float64)}   


   
  
pred = multilayer_perceptron_with_relu(X, weights, biases)  
  
cost = tf.reduce_mean(tf.pow(tf.subtract(Y, pred), 2))  
loss = tf.reduce_mean(tf.exp(tf.abs(tf.subtract(Y, pred))))
Example #53
0
def percentile(x,
               q,
               axis=None,
               interpolation=None,
               keep_dims=False,
               validate_args=False,
               name=None):
  """Compute the `q`-th percentile of `x`.

  Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
  way from the minimum to the maximum in a sorted copy of `x`.

  The values and distances of the two nearest neighbors as well as the
  `interpolation` parameter will determine the percentile if the normalized
  ranking does not match the location of `q` exactly.

  This function is the same as the median if `q = 50`, the same as the minimum
  if `q = 0` and the same as the maximum if `q = 100`.


  ```python
  # Get 30th percentile with default ('nearest') interpolation.
  x = [1., 2., 3., 4.]
  percentile(x, q=30.)
  ==> 2.0

  # Get 30th percentile with 'lower' interpolation
  x = [1., 2., 3., 4.]
  percentile(x, q=30., interpolation='lower')
  ==> 1.0

  # Get 100th percentile (maximum).  By default, this is computed over every dim
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100.)
  ==> 4.0

  # Treat the leading dim as indexing samples, and find the 100th quantile (max)
  # over all such samples.
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100., axis=[0])
  ==> [3., 4.]
  ```

  Compare to `numpy.percentile`.

  Args:
    x:  Floating point `N-D` `Tensor` with `N > 0`.  If `axis` is not `None`,
      `x` must have statically known number of dimensions.
    q:  Scalar `Tensor` in `[0, 100]`. The percentile.
    axis:  Optional `0-D` or `1-D` integer `Tensor` with constant values.
      The axis that hold independent samples over which to return the desired
      percentile.  If `None` (the default), treat every dimension as a sample
      dimension, returning a scalar.
    interpolation : {"lower", "higher", "nearest"}.  Default: "nearest"
      This optional parameter specifies the interpolation method to
      use when the desired quantile lies between two data points `i < j`:
        * lower: `i`.
        * higher: `j`.
        * nearest: `i` or `j`, whichever is nearest.
    keep_dims:  Python `bool`. If `True`, the last dimension is kept with size 1
      If `False`, the last dimension is removed from the output shape.
    validate_args:  Whether to add runtime checks of argument validity.
      If False, and arguments are incorrect, correct behavior is not guaranteed.
    name:  A Python string name to give this `Op`.  Default is "percentile"

  Returns:
    A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
      `axis` is `None`, a scalar.

  Raises:
    ValueError:  If argument 'interpolation' is not an allowed type.
  """
  name = name or "percentile"
  allowed_interpolations = {"lower", "higher", "nearest"}

  if interpolation is None:
    interpolation = "nearest"
  else:
    if interpolation not in allowed_interpolations:
      raise ValueError("Argument 'interpolation' must be in %s.  Found %s" %
                       (allowed_interpolations, interpolation))

  with tf.name_scope(name, [x, q]):
    x = tf.convert_to_tensor(x, name="x")
    # Double is needed here and below, else we get the wrong index if the array
    # is huge along axis.
    q = tf.to_double(q, name="q")
    _get_static_ndims(q, expect_ndims=0)

    if validate_args:
      q = control_flow_ops.with_dependencies([
          tf.assert_rank(q, 0),
          tf.assert_greater_equal(q, tf.to_double(0.)),
          tf.assert_less_equal(q, tf.to_double(100.))
      ], q)

    if axis is None:
      y = tf.reshape(x, [-1])
    else:
      axis = tf.convert_to_tensor(axis, name="axis")
      tf.assert_integer(axis)
      axis_ndims = _get_static_ndims(
          axis, expect_static=True, expect_ndims_no_more_than=1)
      axis_const = tensor_util.constant_value(axis)
      if axis_const is None:
        raise ValueError(
            "Expected argument 'axis' to be statically available.  Found: %s" %
            axis)
      axis = axis_const
      if axis_ndims == 0:
        axis = [axis]
      axis = [int(a) for a in axis]
      x_ndims = _get_static_ndims(
          x, expect_static=True, expect_ndims_at_least=1)
      axis = _make_static_axis_non_negative(axis, x_ndims)
      y = _move_dims_to_flat_end(x, axis, x_ndims)

    frac_at_q_or_above = 1. - q / 100.
    d = tf.to_double(tf.shape(y)[-1])

    if interpolation == "lower":
      index = tf.ceil((d - 1) * frac_at_q_or_above)
    elif interpolation == "higher":
      index = tf.floor((d - 1) * frac_at_q_or_above)
    elif interpolation == "nearest":
      index = tf.round((d - 1) * frac_at_q_or_above)

    # If d is gigantic, then we would have d == d - 1, even in double... So
    # let's use max/min to avoid out of bounds errors.
    d = tf.shape(y)[-1]
    # d - 1 will be distinct from d in int32.
    index = tf.clip_by_value(tf.to_int32(index), 0, d - 1)

    # Sort everything, not just the top 'k' entries, which allows multiple calls
    # to sort only once (under the hood) and use CSE.
    sorted_y = _sort_tensor(y)

    # result.shape = B
    result = sorted_y[..., index]
    result.set_shape(y.get_shape()[:-1])

    if keep_dims:
      if axis is None:
        # ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
        ones_vec = tf.ones(shape=[_get_best_effort_ndims(x)], dtype=tf.int32)
        result *= tf.ones(ones_vec, dtype=x.dtype)
      else:
        result = _insert_back_keep_dims(result, axis)

    return result