コード例 #1
0
ファイル: test_util.py プロジェクト: asudomoeva/probability
  def histogram(self, x, value_range=None, nbins=None, name=None):
    """Return histogram of values.

    Given the tensor `values`, this operation returns a rank 1 histogram
    counting the number of entries in `values` that fell into every bin. The
    bins are equal width and determined by the arguments `value_range` and
    `nbins`.

    Args:
      x: 1D numeric `Tensor` of items to count.
      value_range:  Shape [2] `Tensor`. `new_values <= value_range[0]` will be
        mapped to `hist[0]`, `values >= value_range[1]` will be mapped to
        `hist[-1]`. Must be same dtype as `x`.
      nbins:  Scalar `int32 Tensor`.  Number of histogram bins.
      name: Python `str` name prefixed to Ops created by this class.

    Returns:
      counts: 1D `Tensor` of counts, i.e.,
        `counts[i] = sum{ edges[i-1] <= values[j] < edges[i] : j }`.
      edges: 1D `Tensor` characterizing intervals used for counting.
    """
    with tf.name_scope(name, "histogram", [x]):
      x = tf.convert_to_tensor(x, name="x")
      if value_range is None:
        value_range = [tf.reduce_min(x), 1 + tf.reduce_max(x)]
      value_range = tf.convert_to_tensor(value_range, name="value_range")
      lo = value_range[0]
      hi = value_range[1]
      if nbins is None:
        nbins = tf.to_int32(hi - lo)
      delta = (hi - lo) / tf.cast(nbins, dtype=value_range.dtype.base_dtype)
      edges = tf.range(
          start=lo, limit=hi, delta=delta, dtype=x.dtype.base_dtype)
      counts = tf.histogram_fixed_width(x, value_range=value_range, nbins=nbins)
      return counts, edges
コード例 #2
0
  def test_multiple_random_accumulating_updates_results_in_right_dist(self):
    # Accumulate the updates in a new variable.  Resultant
    # histogram should be uniform.  Use only 3 bins because with many bins it
    # would be unlikely that all would be close to 1/n.  If someone ever wants
    # to test that, it would be better to check that the cdf was linear.
    value_range = [1.0, 4.14159]
    with self.test_session() as sess:
      values = tf.placeholder(tf.float32, shape=[4, 4, 4])
      hist = tf.histogram_fixed_width(values,
                                      value_range,
                                      nbins=3,
                                      dtype=tf.int64)

      hist_accum = tf.Variable(tf.zeros_initializer([3], dtype=tf.int64))
      hist_accum = hist_accum.assign_add(hist)

      tf.initialize_all_variables().run()

      for _ in range(100):
        # Map the rv: U[0, 1] --> U[value_range[0], value_range[1]].
        values_arr = (
            value_range[0] +
            (value_range[1] - value_range[0]) * self.rng.rand(4, 4, 4))

        hist_accum_arr = sess.run(hist_accum, feed_dict={values: values_arr})

    pmf = hist_accum_arr / float(hist_accum_arr.sum())
    np.testing.assert_allclose(1 / 3, pmf, atol=0.02)
コード例 #3
0
  def test_one_update_on_constant_2d_input(self):
    # Bins will be:
    #   (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    value_range = [0.0, 5.0]
    values = [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]]
    expected_bin_counts = [2, 1, 1, 0, 2]
    with self.test_session():
      hist = tf.histogram_fixed_width(values, value_range, nbins=5)

      # Hist should start "fresh" with every eval.
      self.assertAllClose(expected_bin_counts, hist.eval())
      self.assertAllClose(expected_bin_counts, hist.eval())
コード例 #4
0
  def test_empty_input_gives_all_zero_counts(self):
    # Bins will be:
    #   (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    value_range = [0.0, 5.0]
    values = []
    expected_bin_counts = [0, 0, 0, 0, 0]
    with self.test_session():
      hist = tf.histogram_fixed_width(values, value_range, nbins=5)

      # Hist should start "fresh" with every eval.
      self.assertAllClose(expected_bin_counts, hist.eval())
      self.assertAllClose(expected_bin_counts, hist.eval())
コード例 #5
0
    def test_two_updates_on_scalar_input(self):
        # Bins will be:
        #   (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
        value_range = [0.0, 5.0]
        values_1 = 1.5
        values_2 = 2.5
        expected_bin_counts_1 = [0, 1, 0, 0, 0]
        expected_bin_counts_2 = [0, 0, 1, 0, 0]
        with self.test_session():
            values = tf.placeholder(tf.float32, shape=[])
            hist = tf.histogram_fixed_width(values, value_range, nbins=5)

            # The values in hist should depend on the current feed and nothing else.
            self.assertAllClose(expected_bin_counts_2, hist.eval(feed_dict={values: values_2}))
            self.assertAllClose(expected_bin_counts_1, hist.eval(feed_dict={values: values_1}))
            self.assertAllClose(expected_bin_counts_1, hist.eval(feed_dict={values: values_1}))
            self.assertAllClose(expected_bin_counts_2, hist.eval(feed_dict={values: values_2}))
コード例 #6
0
  def test_two_updates_on_constant_input(self):
    # Bins will be:
    #   (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    value_range = [0.0, 5.0]
    values_1 = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
    values_2 = [1.5, 4.5, 4.5, 4.5, 0.0, 0.0]
    expected_bin_counts_1 = [2, 1, 1, 0, 2]
    expected_bin_counts_2 = [2, 1, 0, 0, 3]
    with self.test_session():
      values = tf.placeholder(tf.float32, shape=[6])
      hist = tf.histogram_fixed_width(values, value_range, nbins=5)
      tf.initialize_all_variables().run()

      # The values in hist should depend on the current feed and nothing else.
      self.assertAllClose(expected_bin_counts_1,
                          hist.eval(feed_dict={values: values_1}))
      self.assertAllClose(expected_bin_counts_2,
                          hist.eval(feed_dict={values: values_2}))
      self.assertAllClose(expected_bin_counts_1,
                          hist.eval(feed_dict={values: values_1}))
      self.assertAllClose(expected_bin_counts_1,
                          hist.eval(feed_dict={values: values_1}))
コード例 #7
0
ファイル: network-mnist.py プロジェクト: IsaacYangSLA/DIGITS
    def build_model(self):
        """Create the main ops"""

        if not self.is_inference:
            # create both the generator and the discriminator
            # self.x is a batch of images - shape: [N, H, W, C]
            # self.y is a vector of labels - shape: [N]

            # sample z from a normal distribution
            self.z = tf.random_normal(shape=[self.batch_size, self.z_dim], dtype=tf.float32, seed=None, name='z')

            # rescale x to [0, 1]
            x_reshaped = tf.reshape(self.x, shape=[self.batch_size, self.image_size, self.image_size, self.c_dim],
                                    name='x_reshaped')
            self.images = x_reshaped / 255.

            # one hot encode the label - shape: [N] -> [N, self.y_dim]
            self.y = tf.one_hot(self.y, self.y_dim, name='y_onehot')

            # create the generator
            self.G = self.generator(self.z, self.y)

            # create one instance of the discriminator for real images (the input is
            # images from the dataset)
            self.D, self.D_logits = self.discriminator(self.images, self.y, reuse=False)

            # create another instance of the discriminator for fake images (the input is
            # the discriminator). Note how we are reusing variables to share weights between
            # both instances of the discriminator
            self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)

            # aggregate losses across batch

            # we are using the cross entropy loss for all these losses
            d_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
                                                             labels=tf.ones_like(self.D),
                                                             name="loss_D_real")
            self.d_loss_real = tf.reduce_mean(d_real)
            d_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                             labels=tf.zeros_like(self.D_),
                                                             name="loss_D_fake")
            self.d_loss_fake = tf.reduce_mean(d_fake)
            self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2.
            # the typical GAN set-up is that of a minimax game where D is trying to minimize
            # its own error and G is trying to maximize D's error however note how we are flipping G labels here:
            # instead of maximizing D's error, we are minimizing D's error on the 'wrong' label
            # this trick helps produce a stronger gradient
            g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                             labels=tf.ones_like(self.D_),
                                                             name="loss_G")
            self.g_loss = tf.reduce_mean(g_loss)

            # create some summaries for debug and monitoring
            self.summaries.append(histogram_summary("z", self.z))
            self.summaries.append(histogram_summary("d", self.D))
            self.summaries.append(histogram_summary("d_", self.D_))
            self.summaries.append(image_summary("G", self.G, max_outputs=5))
            self.summaries.append(image_summary("X", self.images, max_outputs=5))
            self.summaries.append(histogram_summary("G_hist", self.G))
            self.summaries.append(histogram_summary("X_hist", self.images))
            self.summaries.append(scalar_summary("d_loss_real", self.d_loss_real))
            self.summaries.append(scalar_summary("d_loss_fake", self.d_loss_fake))
            self.summaries.append(scalar_summary("g_loss", self.g_loss))
            self.summaries.append(scalar_summary("d_loss", self.d_loss))

            # all trainable variables
            t_vars = tf.trainable_variables()
            # G's variables
            self.g_vars = [var for var in t_vars if 'g_' in var.name]
            # D's variables
            self.d_vars = [var for var in t_vars if 'd_' in var.name]

            # Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram
            value_range = [0.0, 1.0]
            nbins = 100
            hist_g = tf.to_float(tf.histogram_fixed_width(self.G, value_range, nbins=nbins)) / nbins
            hist_images = tf.to_float(tf.histogram_fixed_width(self.images, value_range, nbins=nbins)) / nbins
            chi_square = tf.reduce_mean(tf.div(tf.square(hist_g - hist_images), hist_g + hist_images + 1e-5))
            self.summaries.append(scalar_summary("chi_square", chi_square))
        else:
            # Create only the generator

            # self.x is the conditioned latent representation - shape: [self.batch_size, 1, self.z_dim + self.y_dim]
            self.x = tf.reshape(self.x, shape=[self.batch_size, self.z_dim + self.y_dim])
            # extract z and y
            self.y = self.x[:, self.z_dim:self.z_dim + self.y_dim]
            self.z = self.x[:, :self.z_dim]
            # create an instance of the generator
            self.G = self.generator(self.z, self.y)
コード例 #8
0
def build_train(predictions,
                end_points,
                y,
                embedding_sizes,
                shrinkage=0.06,
                lambda_div=0.0,
                C=25,
                alpha=2.0,
                beta=0.5,
                initial_acts=0.5,
                eta_style=False,
                dtype=tf.float32,
                regularization=None):
    """
    Builds the boosting based training.

    Args:
        predictions: tensor of the embedding predictions
        end_points: dictionary of endpoints of the embedding tower
        y: tensor class labels
        embedding_sizes: list, which indicates the size of the sub-embedding
                         (e.g. [96, 160, 256])
        shrinkage: if you use eta_style = True, set to 1.0, otherwise keep it
                   small (e.g. 0.06).
        lambda_div: regularization parameter.
        C: parameter for binomial deviance.
        alpha: parameter for binomial deviance.
        dtype: data type for computations, typically tf.float32
        initial_acts: 0.5 if eta_style is false, 0.0 if eta_style is true
        regularization: regularization method (either activation or
                        adversarial)
    Returns:
        The training loss.
    """
    shape = predictions.get_shape().as_list()
    num_learners = len(embedding_sizes)
    # Pairwise labels.
    pairs = tf.reshape(
        tf.cast(tf.equal(y[:, tf.newaxis], y[tf.newaxis, :]), dtype), [-1])

    m = 1.0 * pairs + (-C * (1.0 - pairs))
    W = tf.reshape((1.0 - tf.eye(shape[0], dtype=dtype)), [-1])
    W = W * pairs / tf.reduce_sum(pairs) + W * \
        (1.0 - pairs) / tf.reduce_sum(1.0 - pairs)

    # * boosting_weights_init
    boosting_weights = tf.ones(shape=(shape[0] * shape[0], ), dtype=dtype)

    normed_fvecs = []
    regular_fvecs = []

    # L2 normalize fvecs
    for i in xrange(len(embedding_sizes)):
        start = int(sum(embedding_sizes[:i]))
        stop = int(start + embedding_sizes[i])

        fvec = tf.cast(predictions[:, start:stop], dtype)
        regular_fvecs.append(fvec)
        fvec = do_print(fvec, [tf.norm(fvec, axis=1)],
                        'fvecs_{}_norms'.format(i))
        tf.summary.histogram('fvecs_{}'.format(i), fvec)
        tf.summary.histogram('fvecs_{}_norm'.format(i), tf.norm(fvec, axis=1))
        normed_fvecs.append(fvec /
                            tf.maximum(tf.constant(1e-5, dtype=dtype),
                                       tf.norm(fvec, axis=1, keep_dims=True)))

    alpha = tf.constant(alpha, dtype=dtype)
    beta = tf.constant(beta, dtype=dtype)
    C = tf.constant(C, dtype=dtype)
    shrinkage = tf.constant(shrinkage, dtype=dtype)

    loss = tf.constant(0.0, dtype=dtype)
    acts = tf.constant(initial_acts, dtype=dtype)
    tf.summary.histogram('boosting_weights_0', boosting_weights)
    tf.summary.histogram(
        'boosting_weights_0_pos',
        tf.boolean_mask(boosting_weights, tf.equal(pairs, 1.0)))
    tf.summary.histogram(
        'boosting_weights_0_neg',
        tf.boolean_mask(boosting_weights, tf.equal(pairs, 0.0)))
    Ds = []
    for i in xrange(len(embedding_sizes)):
        fvec = normed_fvecs[i]
        Ds.append(tf.matmul(fvec, tf.transpose(fvec)))

        D = tf.reshape(Ds[-1], [-1])
        my_act = alpha * (D - beta) * m
        my_loss = tf.log(tf.exp(-my_act) + tf.constant(1.0, dtype=dtype))
        tmp = (tf.reduce_sum(my_loss * boosting_weights * W) /
               tf.constant(num_learners, dtype=dtype))
        loss += tmp

        tf.summary.scalar('learner_loss_{}'.format(i), tmp)

        if eta_style:
            nu = 2.0 / (1.0 + 1.0 + i)
            if shrinkage != 1.0:
                acts = (1.0 - nu) * acts + nu * shrinkage * D
                inputs = alpha * (acts - beta) * m
                booster_loss = tf.log(tf.exp(-(inputs)) + 1.0)
                boosting_weights = tf.stop_gradient(
                    -tf.gradients(tf.reduce_sum(booster_loss), inputs)[0])
            else:
                acts = (1.0 - nu) * acts + nu * shrinkage * my_act
                booster_loss = tf.log(tf.exp(-acts) + 1.0)
                boosting_weights = tf.stop_gradient(
                    -tf.gradients(tf.reduce_sum(booster_loss), acts)[0])
        else:
            # simpler variant of the boosting algorithm.
            acts += shrinkage * (D - beta) * alpha * m
            booster_loss = tf.log(tf.exp(-acts) + 1.0)
            cls_weight = tf.cast(1.0 * pairs + (1.0 - pairs) * 2.0,
                                 dtype=dtype)
            boosting_weights = tf.stop_gradient(
                -tf.gradients(tf.reduce_sum(booster_loss), acts)[0] *
                cls_weight)

            tf.summary.histogram('boosting_weights_{}'.format(i + 1),
                                 boosting_weights)
            pos_weights = tf.boolean_mask(boosting_weights,
                                          tf.equal(pairs, 1.0))
            neg_weights = tf.boolean_mask(boosting_weights,
                                          tf.equal(pairs, 0.0))
            pos_bins = tf.histogram_fixed_width(
                pos_weights,
                (tf.constant(0.0, dtype=dtype), tf.constant(1.0, dtype=dtype)),
                nbins=10)
            neg_bins = tf.histogram_fixed_width(
                neg_weights,
                (tf.constant(0.0, dtype=dtype), tf.constant(1.0, dtype=dtype)),
                nbins=10)
            loss = do_print(loss, [tf.reduce_mean(booster_loss)],
                            'Booster loss {}'.format(i + 1))
            loss = do_print(
                loss, [pos_bins, neg_bins],
                'Positive and negative boosting weights {}'.format(i + 1),
                summarize=100)

            tf.summary.histogram('boosting_weights_{}_pos'.format(i + 1),
                                 pos_weights)
            tf.summary.histogram('boosting_weights_{}_neg'.format(i + 1),
                                 neg_weights)
            tf.summary.scalar('booster_loss_{}'.format(i + 1),
                              tf.reduce_mean(booster_loss))

    # add the independence loss
    tf.summary.scalar('discriminative_loss', loss)

    embedding_weights = [
        v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        if 'embedding' in v.name and 'weight' in v.name
    ]
    if lambda_div > 0.0:
        loss += REGULARIZATION_FUNCTIONS[regularization](
            fvecs=normed_fvecs,
            end_points=end_points,
            embedding_weights=embedding_weights,
            embedding_sizes=embedding_sizes,
            lambda_weight=LAMBDA_WEIGHT) * lambda_div
    tf.summary.scalar('loss', loss)
    return loss
コード例 #9
0
    def build_model(self):

        if not self.is_inference:
            # create both the generator and the discriminator
            # self.x is a batch of images - shape: [N, H, W, C]
            # self.y is a vector of labels - shape: [N]

            # sample z from a normal distribution
            self.z = tf.random_normal(shape=[self.batch_size, self.z_dim],
                                      dtype=tf.float32,
                                      seed=None,
                                      name='z')

            # scale input to [-1, +1] range
            self.images = (tf.reshape(self.x,
                                      shape=[
                                          self.batch_size, self.image_size,
                                          self.image_size, self.c_dim
                                      ],
                                      name='x_reshaped') - 128) / 127.

            # create generator
            self.G = self.generator(self.z)
            # create an instance of the discriminator (real samples)
            self.D, self.D_logits = self.discriminator(self.images,
                                                       reuse=False)
            # create another identical instance of the discriminator (fake samples)
            # NOTE: we are re-using variables here to share weights between the two
            # instances of the discriminator
            self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)

            # we are using the cross entropy loss for all these losses
            # note the use of the soft label smoothing here to prevent D from getting overly confident
            # on real samples
            d_real = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_logits,
                labels=(tf.ones_like(self.D) - self.soft_label_margin),
                name="loss_D_real")
            self.d_loss_real = tf.reduce_mean(d_real)
            d_fake = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_logits_,
                labels=(tf.zeros_like(self.D_)),
                name="loss_D_fake")
            self.d_loss_fake = tf.reduce_mean(d_fake)
            self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2.
            # the typical GAN set-up is that of a minimax game where D is trying to minimize
            # its own error and G is trying to maximize D's error however note how we are flipping G labels here:
            # instead of maximizing D's error, we are minimizing D's error on the 'wrong' label
            # this trick helps produce a stronger gradient
            g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_logits_,
                labels=(tf.ones_like(self.D_) + self.soft_label_margin),
                name="loss_G")
            self.g_loss = tf.reduce_mean(g_loss)

            # debug
            self.summaries.append(image_summary("G", self.G, max_outputs=3))
            self.summaries.append(
                image_summary("X", self.images, max_outputs=3))
            self.summaries.append(histogram_summary("G_hist", self.G))
            self.summaries.append(histogram_summary("X_hist", self.images))
            self.summaries.append(
                scalar_summary("d_loss_real", self.d_loss_real))
            self.summaries.append(
                scalar_summary("d_loss_fake", self.d_loss_fake))
            self.summaries.append(scalar_summary("g_loss", self.g_loss))
            self.summaries.append(scalar_summary("d_loss", self.d_loss))

            # all trainable variables
            t_vars = tf.trainable_variables()
            # G variables
            self.g_vars = [var for var in t_vars if 'g_' in var.name]
            # D variables
            self.d_vars = [var for var in t_vars if 'd_' in var.name]

            # Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram
            value_range = [0.0, 1.0]
            nbins = 100
            hist_g = tf.histogram_fixed_width(
                self.G, value_range, nbins=nbins, dtype=tf.float32) / nbins
            hist_images = tf.histogram_fixed_width(
                self.images, value_range, nbins=nbins,
                dtype=tf.float32) / nbins
            chi_square = tf.reduce_mean(
                tf.div(tf.square(hist_g - hist_images),
                       hist_g + hist_images + 1e-5))
            self.summaries.append(scalar_summary("chi_square", chi_square))
        else:
            # Create only the generator
            self.x = tf.reshape(self.x, shape=[self.batch_size, self.z_dim])
            self.z = self.x[:, :self.z_dim]
            self.G = self.generator(self.z)
コード例 #10
0
ファイル: t8.py プロジェクト: andreum/user-movie-embedding
        train_prediction = tf_user_bias + tf_movie_bias
    error = tf.subtract(train_prediction, tf_train_labels)
    sse = tf.reduce_sum(tf.square(error))
    if (NUM_FEATURES > 0):
        regularization = tf.reduce_sum(
            tf.square(tf_user_embeddings)) / NUM_FEATURES + tf.reduce_sum(
                tf.abs(tf_movie_embeddings)) / NUM_FEATURES
    else:
        regularization = tf.reduce_sum(
            tf.square(tf_movie_bias)) + tf.reduce_sum(tf.square(tf_user_bias))
# There's o need to regularize the biases
# + tf.reduce_sum(tf.square(tf_movie_bias))*batch_size/NUM_MOVIES + tf.reduce_sum(tf.square(tf_user_bias)) * batch_size / NUM_USERS
    loss = sse + alpha * regularization
    mse = sse / batch_size
    optimizer = tf.train.GradientDescentOptimizer(tf_lr).minimize(loss)
    histogram = tf.histogram_fixed_width(error, [-4.5, 4.5], nbins=10)

with tf.Session(graph=graph) as session:
    tf.global_variables_initializer().run()
    print("Initialized")
    uemb, memb = session.run([user_embeddings, movie_embeddings])
    print("user embeddings: {}\n", uemb)
    print("movie embeddings: {}\n", memb)
    acccount = acctot = 0.0
    old_loss = 1e20
    lr = base_lbda
    for step in range(num_steps):
        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
        batch_data = train_data[offset:(offset + batch_size), :]
        batch_labels = train_labels[offset:(offset + batch_size), :]
        feed_dict = {
コード例 #11
0
def histogram(image, nbins=256, source_range='image', normalize=False):
    """Return histogram of image.
    This function returns the centers of bins and does not rebin integer 
    arrays. For integer arrays, each integer value has
    its own bin, which improves speed and intensity-resolution with self built
    bincount_histogram function.
    The histogram is computed on the flattened image: for color images, the
    function should be used separately on each channel to obtain a histogram
    for each color channel.
    Parameters
    ----------
    image : array
        Input image.
    nbins : int, optional
        Number of bins used to calculate histogram. This value is ignored for
        integer arrays.
    source_range : string, optional
        'image' (default) determines the range from the input image.
        'dtype' determines the range from the expected range of the images
        of that data type.
    normalize : bool, optional
        If True, normalize the histogram by the sum of its values.
    Returns
    -------
    hist : array
        The values of the histogram.
    hist_centers : array
        The values at the center of the bins.
    """

    sh = tf.shape(image)

    if len(sh) == 3 and sh[-1] < 4:
        warnings.warn("This might be a color image. The histogram will be "
                      "computed on the flattened image. You can instead "
                      "apply this function to each color channel.")

    image = image.flatten()  #flattern the image into 1 dimentional vector

    # For integer types, histogramming with bincount is more efficient.
    if image.dtype == 'int':
        hist, bin_centers = _bincount_histogram(image, source_range)

    else:

        if source_range == 'image':
            hist_range = [0.0,
                          256.0]  #modify the historgram range as image RGB 256
        elif source_range == 'dtype':
            hist_range = dtype_limits(
                image, clip_negative=False
            )  #modify the histogram range as datype case
        else:
            ValueError('Wrong value for the `source_range` argument')

        hist_centers = [i for i in range(int(hist_range[1]))]

        tensor = tf.convert_to_tensor(image, dtype=tf.float32)
        hist = tf.histogram_fixed_width(tensor, hist_range, nbins)

    if normalize:
        hist = hist / tf.reduce_sum(hist)

    return hist, hist_centers
コード例 #12
0
# da_1 = tf.argmax(img_GT)
# da_1 = tf.argmax(da_1)
# sp_1 = img_GT.get_shape()
sp_1 = tf.shape(img_GT)
da_1_img = tf.reduce_max(img_GT)
# xiao_1 = tf.reduce_min(img_GT)

sp_2 = tf.shape(img_SM)
# da_2 = tf.reduce_max(img_SM)
# xiao_2 = tf.reduce_min(img_SM)

nbins = 100  #256
# VALUE_RANGE = [0, 255]
VALUE_RANGE = [0.0, 255.0]

hist_GT = tf.histogram_fixed_width(img_GT, VALUE_RANGE, nbins)

hist_SM = tf.histogram_fixed_width(img_SM, VALUE_RANGE, nbins)

#####################################
# Differentiable Histogram Counting Method
hist_GT_Dev = np.zeros([1, nbins])

delta = 255 / nbins

BIN_Table = np.arange(0, 100, 1)
BIN_Table = BIN_Table.astype(np.float64)
BIN_Table = BIN_Table * delta
S_total_pixels = 240 * 320  # The rows and columns of the input image

# BIN_Table_2 = np.zeros([1, nbins])
コード例 #13
0
def histogram(input_, value_min, value_max, nbins=None, name="histogram"):
    return tf.histogram_fixed_width(input_, [value_min, value_max], nbins, name=name)
コード例 #14
0
 def hist(a, b):
     return tf.histogram_fixed_width(tf.nn.sigmoid(
         a @ tf.transpose(b)), [0.0, 1.0],
                                     nbins=n_bins)
コード例 #15
0
import tensorflow as tf
import numpy as np
import cv2

x = tf.placeholder(tf.float32,[None,None,None,3],name='x')
lowcut = tf.Variable(tf.constant(0.005),name='lowcut')
highcut = tf.Variable(tf.constant(0.001),name='highcut')

RedHist = tf.histogram_fixed_width(x[:,:,:,0],[0.0,255.0],nbins=255)
GreenHist = tf.histogram_fixed_width(x[:,:,:,1],[0.0,255.0],nbins=255)
BlueHist = tf.histogram_fixed_width(x[:,:,:,2],[0.0,255.0],nbins=255)

PixelAmount = tf.cast(tf.reduce_min([tf.reduce_sum(RedHist),tf.reduce_sum(GreenHist),
                            tf.reduce_sum(BlueHist)]),tf.float32)

CumRed = tf.cast(tf.cumsum(RedHist,axis=0),tf.float32)
CumGreen = tf.cast(tf.cumsum(GreenHist,axis=0),tf.float32)
CumBlue = tf.cast(tf.cumsum(BlueHist,axis=0),tf.float32)

minR = tf.where(tf.cast(tf.add(tf.subtract(CumRed,tf.multiply(PixelAmount,lowcut)),
         tf.abs(tf.subtract(CumRed,tf.multiply(PixelAmount,lowcut)))),tf.bool))[0][0]
minG = tf.where(tf.cast(tf.add(tf.subtract(CumGreen,tf.multiply(PixelAmount,lowcut)),
         tf.abs(tf.subtract(CumGreen,tf.multiply(PixelAmount,lowcut)))),tf.bool))[0][0]
minB = tf.where(tf.cast(tf.add(tf.subtract(CumBlue,tf.multiply(PixelAmount,lowcut)),
         tf.abs(tf.subtract(CumBlue,tf.multiply(PixelAmount,lowcut)))),tf.bool))[0][0]
maxR = tf.where(tf.cast(tf.add(tf.subtract(CumRed,tf.multiply(PixelAmount,tf.subtract(1.0,highcut))),
         tf.abs(tf.subtract(CumRed,tf.multiply(PixelAmount,tf.subtract(1.0,highcut))))),tf.bool))[0][0]
maxG = tf.where(tf.cast(tf.add(tf.subtract(CumGreen,tf.multiply(PixelAmount,tf.subtract(1.0,highcut))),
         tf.abs(tf.subtract(CumGreen,tf.multiply(PixelAmount,tf.subtract(1.0,highcut))))),tf.bool))[0][0]
maxB = tf.where(tf.cast(tf.add(tf.subtract(CumBlue,tf.multiply(PixelAmount,tf.subtract(1.0,highcut))),
         tf.abs(tf.subtract(CumBlue,tf.multiply(PixelAmount,tf.subtract(1.0,highcut))))),tf.bool))[0][0]
コード例 #16
0
def threshold_otsu(image):
    """Return threshold value based on Otsu's method. Adapted to tf from sklearn
    Parameters
    ----------
    image : (N, M) ndarray
        Grayscale input image.
    nbins : int, optional
        Number of bins used to calculate histogram. This value is ignored for
        integer arrays.
    Returns
    -------
    threshold : float
        Upper threshold value. All pixels with an intensity higher than
        this value are assumed to be foreground.
    Raises
    ------
    ValueError
         If ``image`` only contains a single grayscale value.
    References
    ----------
    .. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method
    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()
    >>> thresh = threshold_otsu(image)
    >>> binary = image <= thresh
    Notes
    -----
    The input image must be grayscale.
    """
    if len(image.shape) > 2 and image.shape[-1] in (3, 4):
        msg = ("threshold_otsu is expected to work correctly only for "
               "grayscale images; image shape {0} looks like an RGB image")
        warn(msg.format(image.shape))

    # Check if the image is multi-colored or not
    tf.debugging.assert_none_equal(
        tf.math.reduce_min(image),
        tf.math.reduce_max(image),
        summarize=1,
        message="expects more than one image value",
    )

    hist = tf.histogram_fixed_width(image, tf.constant([0, 255]), 256)
    hist = tf.cast(hist, tf.float32)
    bin_centers = tf.range(0.5, 256, dtype=tf.float32)

    # class probabilities for all possible thresholds
    weight1 = tf.cumsum(hist)
    weight2 = tf.cumsum(hist, reverse=True)
    # class means for all possible thresholds
    mean = tf.math.multiply(hist, bin_centers)
    mean1 = tf.math.divide(tf.cumsum(mean), weight1)
    # mean2 = (tf.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
    mean2 = tf.math.divide(tf.cumsum(mean, reverse=True), weight2)

    # Clip ends to align class 1 and class 2 variables:
    # The last value of ``weight1``/``mean1`` should pair with zero values in
    # ``weight2``/``mean2``, which do not exist.
    tmp1 = tf.math.multiply(weight1[:-1], weight2[1:])
    tmp2 = (mean1[:-1] - mean2[1:])**2
    variance12 = tf.math.multiply(tmp1, tmp2)

    idx = tf.math.argmax(variance12)
    threshold = bin_centers[:-1][idx]
    return threshold
コード例 #17
0
ファイル: network-celebA.py プロジェクト: Dasona/DIGITS
    def build_model(self):

        if not self.is_inference:
            # create both the generator and the discriminator
            # self.x is a batch of images - shape: [N, H, W, C]
            # self.y is a vector of labels - shape: [N]

            # sample z from a normal distribution
            self.z = tf.random_normal(shape=[self.batch_size, self.z_dim], dtype=tf.float32, seed=None, name='z')

            # scale input to [-1, +1] range
            self.images = (tf.reshape(self.x,
                                      shape=[self.batch_size,
                                             self.image_size,
                                             self.image_size,
                                             self.c_dim],
                                      name='x_reshaped') - 128) / 127.

            # create generator
            self.G = self.generator(self.z)
            # create an instance of the discriminator (real samples)
            self.D, self.D_logits = self.discriminator(self.images, reuse=False)
            # create another identical instance of the discriminator (fake samples)
            # NOTE: we are re-using variables here to share weights between the two
            # instances of the discriminator
            self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)

            # we are using the cross entropy loss for all these losses
            # note the use of the soft label smoothing here to prevent D from getting overly confident
            # on real samples
            d_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
                                                             labels=(tf.ones_like(self.D) - self.soft_label_margin),
                                                             name="loss_D_real")
            self.d_loss_real = tf.reduce_mean(d_real)
            d_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                             labels=(tf.zeros_like(self.D_)),
                                                             name="loss_D_fake")
            self.d_loss_fake = tf.reduce_mean(d_fake)
            self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2.
            # the typical GAN set-up is that of a minimax game where D is trying to minimize
            # its own error and G is trying to maximize D's error however note how we are flipping G labels here:
            # instead of maximizing D's error, we are minimizing D's error on the 'wrong' label
            # this trick helps produce a stronger gradient
            g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                             labels=(tf.ones_like(self.D_) + self.soft_label_margin),
                                                             name="loss_G")
            self.g_loss = tf.reduce_mean(g_loss)

            # debug
            self.summaries.append(image_summary("G", self.G, max_outputs=3))
            self.summaries.append(image_summary("X", self.images, max_outputs=3))
            self.summaries.append(histogram_summary("G_hist", self.G))
            self.summaries.append(histogram_summary("X_hist", self.images))
            self.summaries.append(scalar_summary("d_loss_real", self.d_loss_real))
            self.summaries.append(scalar_summary("d_loss_fake", self.d_loss_fake))
            self.summaries.append(scalar_summary("g_loss", self.g_loss))
            self.summaries.append(scalar_summary("d_loss", self.d_loss))

            # all trainable variables
            t_vars = tf.trainable_variables()
            # G variables
            self.g_vars = [var for var in t_vars if 'g_' in var.name]
            # D variables
            self.d_vars = [var for var in t_vars if 'd_' in var.name]

            # Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram
            value_range = [0.0, 1.0]
            nbins = 100
            hist_g = tf.histogram_fixed_width(self.G, value_range, nbins=nbins, dtype=tf.float32) / nbins
            hist_images = tf.histogram_fixed_width(self.images, value_range, nbins=nbins, dtype=tf.float32) / nbins
            chi_square = tf.reduce_mean(tf.div(tf.square(hist_g - hist_images), hist_g + hist_images + 1e-5))
            self.summaries.append(scalar_summary("chi_square", chi_square))
        else:
            # Create only the generator
            self.x = tf.reshape(self.x, shape=[self.batch_size, self.z_dim])
            self.z = self.x[:, :self.z_dim]
            self.G = self.generator(self.z)
コード例 #18
0
def calc_histogram(image):
    values_range = tf.constant([0, 255], dtype=tf.float32)
    histogram = tf.histogram_fixed_width(tf.to_float(image), values_range, 256)
    return histogram
コード例 #19
0
def histogram(tensor, value_range=[0.0, 1.0], nbins=100):
    """Return histogram of tensor"""
    h, w, c = tensor.shape
    hist = tf.histogram_fixed_width(tensor, value_range, nbins=nbins)
    hist = tf.divide(hist, h * w * c)
    return hist
コード例 #20
0
 def loop_body(i, hist):
     h = tf.histogram_fixed_width(t[i, :], [0.0, 10.0], nbins=shape_s)
     return i + 1, tf.concat([hist, tf.expand_dims(h, 0)], axis=0)
コード例 #21
0
ファイル: models.py プロジェクト: shoeffner/ann3depth
 def color_histogram(self, superpixel):
     values = tf.reduce_sum(superpixel * (16777216., 65536., 256.), axis=-1)
     histogram = tf.histogram_fixed_width(values, (0, 16777216.),
                                          256, tf.float32)
     return histogram
コード例 #22
0
ファイル: network-mnist.py プロジェクト: 4QuantOSS/OpenDIGITS
    def build_model(self):
        """Create the main ops"""

        if not self.is_inference:
            # create both the generator and the discriminator
            # self.x is a batch of images - shape: [N, H, W, C]
            # self.y is a vector of labels - shape: [N]

            # sample z from a normal distribution
            self.z = tf.random_normal(shape=[self.batch_size, self.z_dim],
                                      dtype=tf.float32,
                                      seed=None,
                                      name='z')

            # rescale x to [0, 1]
            x_reshaped = tf.reshape(self.x,
                                    shape=[
                                        self.batch_size, self.image_size,
                                        self.image_size, self.c_dim
                                    ],
                                    name='x_reshaped')
            self.images = x_reshaped / 255.

            # one hot encode the label - shape: [N] -> [N, self.y_dim]
            self.y = tf.one_hot(self.y, self.y_dim, name='y_onehot')

            # create the generator
            self.G = self.generator(self.z, self.y)

            # create one instance of the discriminator for real images (the input is
            # images from the dataset)
            self.D, self.D_logits = self.discriminator(self.images,
                                                       self.y,
                                                       reuse=False)

            # create another instance of the discriminator for fake images (the input is
            # the discriminator). Note how we are reusing variables to share weights between
            # both instances of the discriminator
            self.D_, self.D_logits_ = self.discriminator(self.G,
                                                         self.y,
                                                         reuse=True)

            # aggregate losses across batch

            # we are using the cross entropy loss for all these losses
            d_real = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_logits,
                labels=tf.ones_like(self.D),
                name="loss_D_real")
            self.d_loss_real = tf.reduce_mean(d_real)
            d_fake = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_logits_,
                labels=tf.zeros_like(self.D_),
                name="loss_D_fake")
            self.d_loss_fake = tf.reduce_mean(d_fake)
            self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2.
            # the typical GAN set-up is that of a minimax game where D is trying to minimize
            # its own error and G is trying to maximize D's error however note how we are flipping G labels here:
            # instead of maximizing D's error, we are minimizing D's error on the 'wrong' label
            # this trick helps produce a stronger gradient
            g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_logits_,
                labels=tf.ones_like(self.D_),
                name="loss_G")
            self.g_loss = tf.reduce_mean(g_loss)

            # create some summaries for debug and monitoring
            self.summaries.append(histogram_summary("z", self.z))
            self.summaries.append(histogram_summary("d", self.D))
            self.summaries.append(histogram_summary("d_", self.D_))
            self.summaries.append(image_summary("G", self.G, max_outputs=5))
            self.summaries.append(
                image_summary("X", self.images, max_outputs=5))
            self.summaries.append(histogram_summary("G_hist", self.G))
            self.summaries.append(histogram_summary("X_hist", self.images))
            self.summaries.append(
                scalar_summary("d_loss_real", self.d_loss_real))
            self.summaries.append(
                scalar_summary("d_loss_fake", self.d_loss_fake))
            self.summaries.append(scalar_summary("g_loss", self.g_loss))
            self.summaries.append(scalar_summary("d_loss", self.d_loss))

            # all trainable variables
            t_vars = tf.trainable_variables()
            # G's variables
            self.g_vars = [var for var in t_vars if 'g_' in var.name]
            # D's variables
            self.d_vars = [var for var in t_vars if 'd_' in var.name]

            # Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram
            value_range = [0.0, 1.0]
            nbins = 100
            hist_g = tf.histogram_fixed_width(
                self.G, value_range, nbins=nbins, dtype=tf.float32) / nbins
            hist_images = tf.histogram_fixed_width(
                self.images, value_range, nbins=nbins,
                dtype=tf.float32) / nbins
            chi_square = tf.reduce_mean(
                tf.div(tf.square(hist_g - hist_images),
                       hist_g + hist_images + 1e-5))
            self.summaries.append(scalar_summary("chi_square", chi_square))
        else:
            # Create only the generator

            # self.x is the conditioned latent representation - shape: [self.batch_size, 1, self.z_dim + self.y_dim]
            self.x = tf.reshape(
                self.x, shape=[self.batch_size, self.z_dim + self.y_dim])
            # extract z and y
            self.y = self.x[:, self.z_dim:self.z_dim + self.y_dim]
            self.z = self.x[:, :self.z_dim]
            # create an instance of the generator
            self.G = self.generator(self.z, self.y)
コード例 #23
0
 def one_histogram(accumulator, element):
     hist = tf.histogram_fixed_width(data, element, num_bins)
     return tf.add(accumulator, hist)