Example #1
0
    def fc_layers(self):
        # fc1
        with tf.name_scope('fc1') as scope:
            shape = int(np.prod(self.pool5.get_shape()[1:]))
            fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
                                 trainable=True, name='biases')
            pool5_flat = tf.reshape(self.pool5, [-1, shape])
            fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
            self.fc1 = tf.nn.relu(fc1l)
            self.parameters += [fc1w, fc1b]

        # fc2
        with tf.name_scope('fc2') as scope:
            fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
                                 trainable=True, name='biases')
            fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
            self.fc2 = tf.nn.relu(fc2l)
            self.parameters += [fc2w, fc2b]

        # fc3
        with tf.name_scope('fc3') as scope:
            fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
                                 trainable=True, name='biases')
            self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
            self.parameters += [fc3w, fc3b]
Example #2
0
def batch_norm(x, n_out, phase_train, scope='bn', affine=True):
  """
  Batch normalization on convolutional maps.
  Args:
    x: Tensor, 4D BHWD input maps
    n_out: integer, depth of input maps
    phase_train: boolean tf.Variable, true indicates training phase
    scope: string, variable scope
    affine: whether to affine-transform outputs
  Return:
    normed: batch-normalized maps
  """
  with tf.variable_scope(scope):
    beta = tf.Variable(tf.constant(0.0, shape=[n_out]),
      name='beta', trainable=True)
    gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),
      name='gamma', trainable=affine)
    tf.add_to_collection('biases', beta)
    tf.add_to_collection('weights', gamma)

    batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
    ema = tf.train.ExponentialMovingAverage(decay=0.99)

    def mean_var_with_update():
      ema_apply_op = ema.apply([batch_mean, batch_var])
      with tf.control_dependencies([ema_apply_op]):
        return tf.identity(batch_mean), tf.identity(batch_var)
    mean, var = control_flow_ops.cond(phase_train,
      mean_var_with_update,
      lambda: (ema.average(batch_mean), ema.average(batch_var)))

    normed = tf.nn.batch_norm_with_global_normalization(x, mean, var, 
      beta, gamma, 1e-3, affine)
  return normed
Example #3
0
  def testMultipleMutableHashTables(self):
    with self.test_session() as sess:
      default_val = -1
      keys = tf.constant(["brain", "salad", "surgery"])
      values = tf.constant([0, 1, 2], tf.int64)

      table1 = tf.contrib.lookup.MutableHashTable(tf.string,
                                                  tf.int64,
                                                  default_val)
      table2 = tf.contrib.lookup.MutableHashTable(tf.string,
                                                  tf.int64,
                                                  default_val)
      table3 = tf.contrib.lookup.MutableHashTable(tf.string,
                                                  tf.int64,
                                                  default_val)
      table1.insert(keys, values).run()
      table2.insert(keys, values).run()
      table3.insert(keys, values).run()

      self.assertAllEqual(3, table1.size().eval())
      self.assertAllEqual(3, table2.size().eval())
      self.assertAllEqual(3, table3.size().eval())

      input_string = tf.constant(["brain", "salad", "tank"])
      output1 = table1.lookup(input_string)
      output2 = table2.lookup(input_string)
      output3 = table3.lookup(input_string)

      out1, out2, out3 = sess.run([output1, output2, output3])
      self.assertAllEqual([0, 1, -1], out1)
      self.assertAllEqual([0, 1, -1], out2)
      self.assertAllEqual([0, 1, -1], out3)
 def testHigherRank(self):
   np.random.seed(1)
   # We check that scalar and empty shapes work as well
   for shape in (7, 0), (4, 3, 2):
     for indices_shape in (), (0,), (3, 0), (3, 5):
       params = np.random.randn(*shape)
       indices = np.random.randint(shape[0], size=indices_shape)
       with self.test_session(use_gpu=self.use_gpu):
         tf_params = tf.constant(params)
         tf_indices = tf.constant(indices)
         gather = tf.gather(tf_params, tf_indices)
         self.assertAllEqual(params[indices], gather.eval())
         self.assertEqual(indices.shape + params.shape[1:], gather.get_shape())
         # Test gradients
         gather_grad = np.random.randn(*gather.get_shape().as_list())
         params_grad, indices_grad = tf.gradients(
             gather, [tf_params, tf_indices], gather_grad)
         self.assertEqual(indices_grad, None)
         self.assertEqual(type(params_grad), tf.IndexedSlices)
         params_grad = tf.convert_to_tensor(params_grad)
         correct_params_grad = np.zeros(shape)
         for i, g in zip(indices.flat,
                         gather_grad.reshape((indices.size,) + shape[1:])):
           correct_params_grad[i] += g
         self.assertAllClose(correct_params_grad, params_grad.eval())
Example #5
0
  def testSignatureMismatch(self):
    with self.test_session():
      default_val = -1
      keys = tf.constant(["brain", "salad", "surgery"])
      values = tf.constant([0, 1, 2], tf.int64)
      table = tf.contrib.lookup.MutableHashTable(tf.string,
                                                 tf.int64,
                                                 default_val)

      # insert with keys of the wrong type
      with self.assertRaises(TypeError):
        table.insert(tf.constant([4, 5, 6]), values).run()

      # insert with values of the wrong type
      with self.assertRaises(TypeError):
        table.insert(keys, tf.constant(["a", "b", "c"])).run()

      self.assertAllEqual(0, table.size().eval())

      table.insert(keys, values).run()
      self.assertAllEqual(3, table.size().eval())

      # lookup with keys of the wrong type
      input_string = tf.constant([1, 2, 3], tf.int64)
      with self.assertRaises(TypeError):
        table.lookup(input_string).eval()

      # default value of the wrong type
      with self.assertRaises(TypeError):
        tf.contrib.lookup.MutableHashTable(tf.string, tf.int64, "UNK")
Example #6
0
  def testMutableHashTableOfTensors(self):
    with self.test_session():
      default_val = tf.constant([-1, -1], tf.int64)
      keys = tf.constant(["brain", "salad", "surgery"])
      values = tf.constant([[0, 1], [2, 3], [4, 5]], tf.int64)
      table = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
                                                 default_val)
      self.assertAllEqual(0, table.size().eval())

      table.insert(keys, values).run()
      self.assertAllEqual(3, table.size().eval())

      input_string = tf.constant(["brain", "salad", "tank"])
      output = table.lookup(input_string)
      self.assertAllEqual([3, 2], output.get_shape())

      result = output.eval()
      self.assertAllEqual([[0, 1], [2, 3], [-1, -1]], result)

      exported_keys, exported_values = table.export()
      self.assertAllEqual([None], exported_keys.get_shape().as_list())
      self.assertAllEqual([None, 2], exported_values.get_shape().as_list())
      # exported data is in the order of the internal map, i.e. undefined
      sorted_keys = np.sort(exported_keys.eval())
      sorted_values = np.sort(exported_values.eval())
      self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
      self.assertAllEqual([[4, 5], [2, 3], [0, 1]], sorted_values)
Example #7
0
  def testMutableHashTableExportInsert(self):
    with self.test_session():
      default_val = tf.constant([-1, -1], tf.int64)
      keys = tf.constant(["brain", "salad", "surgery"])
      values = tf.constant([[0, 1], [2, 3], [4, 5]], tf.int64)
      table1 = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
                                                  default_val)
      self.assertAllEqual(0, table1.size().eval())
      table1.insert(keys, values).run()
      self.assertAllEqual(3, table1.size().eval())

      input_string = tf.constant(["brain", "salad", "tank"])
      expected_output = [[0, 1], [2, 3], [-1, -1]]
      output1 = table1.lookup(input_string)
      self.assertAllEqual(expected_output, output1.eval())

      exported_keys, exported_values = table1.export()
      self.assertAllEqual(3, exported_keys.eval().size)
      self.assertAllEqual(6, exported_values.eval().size)

      # Populate a second table from the exported data
      table2 = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
                                                  default_val)
      self.assertAllEqual(0, table2.size().eval())
      table2.insert(exported_keys, exported_values).run()
      self.assertAllEqual(3, table2.size().eval())

      # Verify lookup result is still the same
      output2 = table2.lookup(input_string)
      self.assertAllEqual(expected_output, output2.eval())
 def input_fn():
   return {
       'age': tf.constant([1]),
       'language': tf.SparseTensor(values=['english'],
                                   indices=[[0, 0]],
                                   shape=[1, 1])
   }, tf.constant([[1]])
Example #9
0
def resize_images(X, height_factor, width_factor, dim_ordering):
    '''Resizes the images contained in a 4D tensor of shape
    - [batch, channels, height, width] (for 'th' dim_ordering)
    - [batch, height, width, channels] (for 'tf' dim_ordering)
    by a factor of (height_factor, width_factor). Both factors should be
    positive integers.
    '''
    if dim_ordering == 'th':
        original_shape = int_shape(X)
        new_shape = tf.shape(X)[2:]
        new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
        X = permute_dimensions(X, [0, 2, 3, 1])
        X = tf.image.resize_nearest_neighbor(X, new_shape)
        X = permute_dimensions(X, [0, 3, 1, 2])
        X.set_shape((None, None, original_shape[2] * height_factor, original_shape[3] * width_factor))
        return X
    elif dim_ordering == 'tf':
        original_shape = int_shape(X)
        new_shape = tf.shape(X)[1:3]
        new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
        X = tf.image.resize_nearest_neighbor(X, new_shape)
        X.set_shape((None, original_shape[1] * height_factor, original_shape[2] * width_factor, None))
        return X
    else:
        raise Exception('Invalid dim_ordering: ' + dim_ordering)
Example #10
0
File: util.py Project: kuprel/skin
def loc_net_fc(images, batch_size):

    images -= 128
    images /= 128.

    images = tf.image.resize_images(images, 150, 150)
    images_flat = tf.reshape(images, [batch_size, -1])
    hidden_size = 100

    with tf.name_scope('fc1') as scope:
        weights = tf.Variable(tf.truncated_normal([150**2*3, hidden_size],
            dtype=tf.float32, stddev=1e-3), name='weights')
        biases = tf.Variable(tf.constant(0.0, shape=[hidden_size],
            dtype=tf.float32), name='biases')
        hidden = tf.add(tf.matmul(images_flat, weights), biases, name=scope)
        hidden = tf.nn.relu(hidden)

    with tf.name_scope('fc2') as scope:
        weights = tf.Variable(tf.truncated_normal([hidden_size, 3],
            dtype=tf.float32, stddev=1e-3), name='weights')
        biases = tf.Variable(tf.constant(0.0, shape=[3], dtype=tf.float32),
            name='biases')
        theta = tf.add(tf.matmul(hidden, weights), biases, name=scope)
        theta = tf.nn.tanh(theta)

    return theta
  def testTensorArrayGradientWritePackConcatAndRead(self):
    with self.test_session(use_gpu=self._use_gpu) as sess:
      ta = tensor_array_ops.TensorArray(
          dtype=tf.float32, tensor_array_name="foo", size=2,
          clear_after_read=False)

      value_0 = tf.constant([-1.0, 1.0])
      value_1 = tf.constant([-10.0, 10.0])

      w0 = ta.write(0, value_0)
      w1 = w0.write(1, value_1)
      p0 = w1.pack()
      r0 = w1.read(0)
      s0 = w1.concat()

      # Test gradient accumulation between read(0), pack(), and concat()
      with tf.control_dependencies([p0, r0, s0]):
        grad_r = tf.gradients(
            ys=[p0, r0, s0], xs=[value_0, value_1],
            grad_ys=[
                [[2.0, 3.0], [4.0, 5.0]],  # pack gradient
                [-0.5, 1.5],  # read(0) gradient
                [20.0, 30.0, 40.0, 50.0]])  # concat gradient
      grad_vals = sess.run(grad_r)  # 2 + 2 entries

      self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
      self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
Example #12
0
  def _test_normal_normal(self, default, dtype):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=tf.constant(0.0, dtype=dtype),
                  scale=tf.constant(1.0, dtype=dtype))
      x = Normal(loc=mu, scale=tf.constant(1.0, dtype=dtype),
                 sample_shape=50)

      n_samples = 2000
      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      if not default:
        qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=dtype)))
        inference = ed.HMC({mu: qmu}, data={x: x_data})
      else:
        inference = ed.HMC([mu], data={x: x_data})
        qmu = inference.latent_vars[mu]
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-1, atol=1e-1)

      old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
      if not default:
        self.assertEqual(old_t, n_samples)
      else:
        self.assertEqual(old_t, 1e4)
      self.assertGreater(old_n_accept, 0.1)
      sess.run(inference.reset)
      new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
      self.assertEqual(new_t, 0)
      self.assertEqual(new_n_accept, 0)
Example #13
0
  def build_greedy_training(self, state, network_states):
    """Extracts features and advances a batch using the oracle path.

    Args:
      state: MasterState from the 'AdvanceMaster' op that advances the
          underlying master to this component.
      network_states: dictionary of component NetworkState objects

    Returns:
      state handle: final state after advancing
      cost: regularization cost, possibly associated with embedding matrices
      correct: since no gold path is available, 0.
      total: since no gold path is available, 0.
    """
    logging.info('Building component: %s', self.spec.name)
    stride = state.current_batch_size * self.training_beam_size
    with tf.variable_scope(self.name, reuse=True):
      state.handle, fixed_embeddings = fetch_differentiable_fixed_embeddings(
          self, state, stride)

    linked_embeddings = [
        fetch_linked_embedding(self, network_states, spec)
        for spec in self.spec.linked_feature
    ]

    with tf.variable_scope(self.name, reuse=True):
      tensors = self.network.create(
          fixed_embeddings, linked_embeddings, None, None, True, stride=stride)
    update_network_states(self, tensors, network_states, stride)
    cost = self.add_regularizer(tf.constant(0.))

    correct, total = tf.constant(0), tf.constant(0)
    return state.handle, cost, correct, total
  def test_quadratic(self):
    fdf = lambda x: ((x-1.3)**2, 2*(x-1.3))

    # Case 1: The starting value is close to 0 and doesn't bracket the min.
    close_start, far_start = tf.constant(0.1), tf.constant(7.0)
    results_close = self.evaluate(tfp.optimizer.linesearch.hager_zhang(
        fdf, initial_step_size=close_start))
    self.assertTrue(results_close.converged)
    self.assertAlmostEqual(results_close.left_pt, results_close.right_pt)
    f0, df0 = fdf(0.0)
    self.assertTrue(_is_exact_wolfe(results_close.left_pt,
                                    results_close.objective_at_left_pt,
                                    results_close.grad_objective_at_left_pt,
                                    f0,
                                    df0,
                                    0.1,
                                    0.9))

    results_far = self.evaluate(tfp.optimizer.linesearch.hager_zhang(
        fdf, initial_step_size=far_start))
    self.assertTrue(results_far.converged)
    self.assertAlmostEqual(results_far.left_pt, results_far.right_pt)
    self.assertTrue(_is_exact_wolfe(results_far.left_pt,
                                    results_far.objective_at_left_pt,
                                    results_far.grad_objective_at_left_pt,
                                    f0,
                                    df0,
                                    0.1,
                                    0.9))
Example #15
0
    def prepareGraph(self):
        logging.debug("prepareGraph")
#         image_size = self.image_size
#         num_labels = self.num_labels
        
        graph = tf.Graph()
        self.graph = graph
        with graph.as_default():
            # Input data.
            # Load the training, validation and test data into constants that are
            # attached to the graph.
            self.getInputData()
#             tf_train_dataset, tf_train_labels = self.getInputData()
            tf_valid_dataset = tf.constant(self.valid_dataset)
            tf_test_dataset = tf.constant(self.test_dataset)
            
            self.setupVariables()
            
            self.setupLossFunction()
            # Optimizer.
            # We are going to find the minimum of this loss using gradient descent.
            self.setupOptimizer()
            
            # Predictions for the training, validation, and test data.
            # These are not part of training, but merely here so that we can report
            # accuracy figures as we train.
            train_prediction = tf.nn.softmax(self.getTempModleOutput_forTest(self.tf_train_dataset))
            valid_prediction = tf.nn.softmax(self.getTempModleOutput_forTest(tf_valid_dataset))
            test_prediction = tf.nn.softmax(self.getTempModleOutput_forTest(tf_test_dataset))
            
            self.train_prediction = train_prediction
            self.valid_prediction= valid_prediction
            self.test_prediction = test_prediction
               
        return
Example #16
0
  def _ComputeSampledLogitsTF(self, weights, biases, hidden_acts, labels,
                              num_sampled, num_classes, num_true, sampled_vals,
                              subtract_log_q, remove_accidental_hits,
                              name="sampled_loss_TF"):
    # Should be called from within a `with test_session():` block
    if isinstance(weights, list):
      weights_tf = [tf.constant(shard) for shard in weights]
    else:
      weights_tf = tf.constant(weights)
    biases_tf = tf.constant(biases)
    hidden_acts_tf = tf.constant(hidden_acts,
                                 shape=(self._batch_size, self._dim))
    labels_tf = tf.constant(labels,
                            dtype=tf.int64,
                            shape=(self._batch_size, num_true))

    pred_logits_tf, pred_labels_tf = tf.nn._compute_sampled_logits(
        weights_tf,
        biases_tf,
        hidden_acts_tf,
        labels_tf,
        num_sampled,
        num_classes,
        num_true,
        sampled_vals,
        subtract_log_q=subtract_log_q,
        remove_accidental_hits=remove_accidental_hits,
        name=name)
    return pred_logits_tf, pred_labels_tf
def custom_layer(input_matrix):
    input_matrix_sqeezed = tf.squeeze(input_matrix)
    A = tf.constant([[1., 2.], [-1., 3.]])
    b = tf.constant(1., shape=[2, 2])
    temp1 = tf.matmul(A, input_matrix_sqeezed)
    temp = tf.add(temp1, b) # Ax + b
    return(tf.sigmoid(temp))
Example #18
0
  def testResumeTrainAchievesRoughlyTheSameLoss(self):
    logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                          'tmp_logs')
    number_of_steps = [300, 301, 305]

    for i in range(len(number_of_steps)):
      with tf.Graph().as_default():
        tf.set_random_seed(i)
        tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
        tf_labels = tf.constant(self._labels, dtype=tf.float32)

        tf_predictions = LogisticClassifier(tf_inputs)
        slim.losses.log_loss(tf_predictions, tf_labels)
        total_loss = slim.losses.get_total_loss()

        optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

        train_op = slim.learning.create_train_op(
            total_loss, optimizer)

        loss = slim.learning.train(
            train_op, logdir, number_of_steps=number_of_steps[i],
            log_every_n_steps=10)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
Example #19
0
  def ModelLoss(self):
    tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
    tf_labels = tf.constant(self._labels, dtype=tf.float32)

    tf_predictions = LogisticClassifier(tf_inputs)
    slim.losses.log_loss(tf_predictions, tf_labels)
    return slim.losses.get_total_loss()
Example #20
0
  def testNoneGlobalStep(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()
      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss,
                                               optimizer,
                                               global_step=None)

      global_step = slim.get_or_create_global_step()

      with tf.Session() as sess:
        # Initialize all variables
        sess.run(tf.global_variables_initializer())

        for _ in range(10):
          sess.run([train_op])
        global_step = global_step.eval()
        # Since train_op don't use global_step it shouldn't change.
        self.assertAllClose(global_step, 0)
Example #21
0
  def testTrainWithTrace(self):
    logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                          'tmp_logs')
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()
      tf.summary.scalar('total_loss', total_loss)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      loss = slim.learning.train(
          train_op,
          logdir,
          number_of_steps=300,
          log_every_n_steps=10,
          trace_every_n_steps=100)
    self.assertIsNotNone(loss)
    for trace_step in [1, 101, 201]:
      trace_filename = 'tf_trace-%d.json' % trace_step
      self.assertTrue(
          os.path.isfile(os.path.join(logdir, trace_filename)))
Example #22
0
def iris_input_fn(num_epochs=None):
  iris = tf.contrib.learn.datasets.load_iris()
  features = tf.reshape(tf.constant(iris.data), [-1, 4])
  if num_epochs:
    features = tf.train.limit_epochs(features, num_epochs=num_epochs)
  target = tf.reshape(tf.constant(iris.target), [-1])
  return features, target
Example #23
0
def net(file_name, x, pooling_function='MAX'):
    mat_dict = scipy.io.loadmat(file_name)
    img_mean = mat_dict['meta'][0][0][1][0][0][0][0][0]
    layers = mat_dict['layers'][0]
    vgg = x
    content_activations = {}
    relu_num = 1
    pool_num = 1
    for layer_data in layers:
        layer = layer_data[0][0]
        layer_type = layer[1][0]
        if layer_type == 'conv':
            weights, biases, *rest = layer[2][0]
            # permute `weights` elements for input to TensorFlow
            weights = np.transpose(weights, (1, 0, 2, 3))
            W_conv = tf.constant(weights)
            # convert `biases` shape from [n,1] to [n]
            biases = biases.reshape(-1)
            b_conv = tf.constant(biases)
            vgg = conv2d(vgg, W_conv, 1) + b_conv
        elif layer_type == 'relu':
            vgg = tf.nn.relu(vgg)
            content_activations["relu"+str(pool_num)+"_"+str(relu_num)] = vgg
            relu_num += 1
        elif layer_type == 'pool':
            if pooling_function == 'AVG':
                vgg = avg_pool(vgg, 2)
            else:
                vgg = max_pool(vgg, 2)
            pool_num += 1
            relu_num = 1
    return vgg, content_activations, img_mean
Example #24
0
 def testShapeWrong(self):
   with tf.Graph().as_default():
     with self.assertRaisesWithPredicateMatch(
         ValueError,
         lambda e: ("Too many elements provided. Needed at most 5, "
                    "but received 7" == str(e))):
       tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
Example #25
0
def boston_input_fn():
    boston = tf.contrib.learn.datasets.load_boston()
    features = tf.cast(
        tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
    labels = tf.cast(
        tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
    return features, labels
def convert_data_to_tensors(x, y):
    inputs = tf.constant(x)
    inputs.set_shape([None, 1])
    
    outputs = tf.constant(y)
    outputs.set_shape([None, 1])
    return inputs, outputs
  def test_draw_bounding_boxes_on_image_tensors(self):
    """Tests that bounding box utility produces reasonable results."""
    category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}

    fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
    image_np = np.array(Image.open(fname))
    images_np = np.stack((image_np, image_np), axis=0)

    with tf.Graph().as_default():
      images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
      boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]],
                           [[0.25, 0.25, 0.75, 0.75], [0.1, 0.3, 0.6, 1.0]]])
      classes = tf.constant([[1, 1], [1, 2]], dtype=tf.int64)
      scores = tf.constant([[0.8, 0.1], [0.6, 0.5]])
      images_with_boxes = (
          visualization_utils.draw_bounding_boxes_on_image_tensors(
              images_tensor,
              boxes,
              classes,
              scores,
              category_index,
              min_score_thresh=0.2))

      with self.test_session() as sess:
        sess.run(tf.global_variables_initializer())

        # Write output images for visualization.
        images_with_boxes_np = sess.run(images_with_boxes)
        self.assertEqual(images_np.shape, images_with_boxes_np.shape)
        for i in range(images_with_boxes_np.shape[0]):
          img_name = 'image_' + str(i) + '.png'
          output_file = os.path.join(self.get_temp_dir(), img_name)
          logging.info('Writing output image %d to %s', i, output_file)
          image_pil = Image.fromarray(images_with_boxes_np[i, ...])
          image_pil.save(output_file)
Example #28
0
  def encode_coordinates_alt(self, net):
    """An alternative implemenation for the encoding coordinates.

    Args:
      net: a tensor of shape=[batch_size, height, width, num_features]

    Returns:
      a list of tensors with encoded image coordinates in them.
    """
    batch_size, h, w, _ = net.shape.as_list()
    h_loc = [
      tf.tile(
          tf.reshape(
              tf.contrib.layers.one_hot_encoding(
                  tf.constant([i]), num_classes=h), [h, 1]), [1, w])
      for i in xrange(h)
    ]
    h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2)
    w_loc = [
      tf.tile(
          tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=w),
          [h, 1]) for i in xrange(w)
    ]
    w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2)
    loc = tf.concat([h_loc, w_loc], 2)
    loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1])
    return tf.concat([net, loc], 3)
Example #29
0
 def bn_variables(self, size, name):
     weights = OrderedDict()
     weights[name+'_mean'] = tf.Variable(tf.constant(0.0, shape=size))
     weights[name +'_variance'] = tf.Variable(tf.constant(1.0, shape=size))
     weights[name + '_offset'] = tf.Variable(tf.constant(0.0, shape=size))
     weights[name + '_scale'] = tf.Variable(tf.constant(1.0, shape=size))
     return weights
Example #30
0
  def testEmptyUpdateOps(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()
      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer,
                                               update_ops=[])

      moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
      moving_variance = tf.contrib.framework.get_variables_by_name(
          'moving_variance')[0]

      with tf.Session() as sess:
        # Initialize all variables
        sess.run(tf.global_variables_initializer())
        mean, variance = sess.run([moving_mean, moving_variance])
        # After initialization moving_mean == 0 and moving_variance == 1.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)

        for _ in range(10):
          sess.run([train_op])
        mean = moving_mean.eval()
        variance = moving_variance.eval()
        # Since we skip update_ops the moving_vars are not updated.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)
Example #31
0
def deconv2d(x,
             num_filters,
             filter_size=[3, 3],
             stride=[1, 1],
             pad='SAME',
             nonlinearity=None,
             init_scale=1.,
             counters={},
             init=False,
             ema=None,
             **kwargs):
    ''' transposed convolutional layer '''
    name = get_name('deconv2d', counters)
    xs = int_shape(x)
    if pad == 'SAME':
        target_shape = [
            xs[0], xs[1] * stride[0], xs[2] * stride[1], num_filters
        ]
    else:
        target_shape = [
            xs[0], xs[1] * stride[0] + filter_size[0] - 1,
            xs[2] * stride[1] + filter_size[1] - 1, num_filters
        ]
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable(
                'V',
                filter_size +
                [num_filters, int(x.get_shape()[-1])],
                tf.float32,
                tf.random_normal_initializer(0, 0.05),
                trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 3])
            x_init = tf.nn.conv2d_transpose(x,
                                            V_norm,
                                            target_shape, [1] + stride + [1],
                                            padding=pad)
            m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            # g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            # b = tf.get_variable('b', dtype=tf.float32,initializer=-m_init*scale_init, trainable=True)
            g = tf.get_variable('g',
                                dtype=tf.float32,
                                initializer=tf.constant(
                                    np.ones(num_filters), tf.float32),
                                trainable=True)
            b = tf.get_variable('b',
                                dtype=tf.float32,
                                initializer=tf.constant(
                                    np.zeros(num_filters), tf.float32),
                                trainable=True)
            # print(b)
            x_init = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (
                x_init - tf.reshape(m_init, [1, 1, 1, num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            # tf.assert_variables_initialized #deprecated on tf 1.3

            # use weight normalization (Salimans & Kingma, 2016)V = t
            W = tf.reshape(g, [1, 1, num_filters, 1]) * tf.nn.l2_normalize(
                V, [0, 1, 3])

            # calculate convolutional layer output
            x = tf.nn.conv2d_transpose(x,
                                       W,
                                       target_shape, [1] + stride + [1],
                                       padding=pad)
            x = tf.nn.bias_add(x, b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x
Example #32
0
    def build(self, input_shape):
        # We just change the way bias is added and remove it from trainable variable!
        input_shape = tf.TensorShape(input_shape)
        if input_shape[-1] is None:
            raise ValueError('The last dimension of the inputs to `Dense` '
                             'should be defined. Found `None`.')
        if not input_shape.is_fully_defined():
            print("the input shape used for build is not fully defined")

        self.kernel = self.add_weight(
            'kernel',
            shape=[input_shape[-1], self.units],
            initializer=self.sparseInitializer,
            regularizer=self.kernel_regularizer,
            constraint=weightFixedAndClippedConstraint(self.sparseInitializer),
            dtype=self.dtype,
            trainable=True)
        self.bias = None

        variableShape = (input_shape[-1], self.units)
        self.mask = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                                trainable=False)

        self.k1 = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                              trainable=False)
        self.k1n = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                               trainable=False)
        self.k2 = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                              trainable=False)
        self.k3 = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                              trainable=False)
        self.k3n = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                               trainable=False)
        self.k4 = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                              trainable=False)
        self.TA0 = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                               trainable=False)
        self.TI0 = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                               trainable=False)

        #only one inhibition by outputs (units):
        self.k5 = tf.Variable(tf.zeros(variableShape[-1], dtype=tf.float64),
                              trainable=False)
        self.k5n = tf.Variable(tf.zeros(variableShape[-1], dtype=tf.float64),
                               trainable=False)
        self.k6 = tf.Variable(tf.zeros(variableShape[-1], dtype=tf.float64),
                              trainable=False)
        self.kdI = tf.Variable(tf.zeros(variableShape[-1], dtype=tf.float64),
                               trainable=False)
        self.kdT = tf.Variable(tf.zeros(variableShape[-1], dtype=tf.float64),
                               trainable=False)

        self.E0 = tf.Variable(tf.constant(1, dtype=tf.float64),
                              trainable=False,
                              dtype=tf.float64)
        self.rescaleFactor = tf.Variable(1, dtype=tf.float64, trainable=False)

        #other intermediates variable:
        self.k1M = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                               trainable=False)
        self.Cactiv = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                                  trainable=False)
        self.k3M = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                               trainable=False)
        self.Cinhib = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                                  trainable=False)
        self.Kactiv = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                                  trainable=False)
        self.Kinhib = tf.Variable(tf.zeros(variableShape, dtype=tf.float64),
                                  trainable=False)

        self.k5M = tf.Variable(tf.zeros(variableShape[-1], dtype=tf.float64),
                               trainable=False)
        self.firstLayerTA0 = tf.Variable(tf.zeros(variableShape[0],
                                                  dtype=tf.float64),
                                         trainable=False)
        self.firstLayerK1M = tf.Variable(tf.zeros(variableShape[0],
                                                  dtype=tf.float64),
                                         trainable=False)
        self.firstLayerkdT = tf.Variable(tf.zeros(variableShape[0],
                                                  dtype=tf.float64),
                                         trainable=False)
        self.firstLayerk2 = tf.Variable(tf.zeros(variableShape[0],
                                                 dtype=tf.float64),
                                        trainable=False)

        self.built = True
        print("Layer successfully built")
Example #33
0
    def set_constants(self, constantArray, enzymeInit, activInitC, inhibInitC,
                      computedRescaleFactor):
        """
            Define the ops assigning the values for the network constants.
        :return:
        """
        self.rescaleFactor.assign(computedRescaleFactor)
        enzymeInitTensor = enzymeInit * (computedRescaleFactor**0.5)
        self.k1.assign(
            tf.cast(tf.fill(self.k1.shape, constantArray[0]),
                    dtype=tf.float64))
        self.k1n.assign(
            tf.cast(tf.fill(self.k1n.shape, constantArray[1]),
                    dtype=tf.float64))
        self.k2.assign(
            tf.cast(tf.fill(self.k2.shape, constantArray[2]),
                    dtype=tf.float64))
        self.k3.assign(
            tf.cast(tf.fill(self.k3.shape, constantArray[3]),
                    dtype=tf.float64))
        self.k3n.assign(
            tf.cast(tf.fill(self.k3n.shape, constantArray[4]),
                    dtype=tf.float64))
        self.k4.assign(
            tf.cast(tf.fill(self.k4.shape, constantArray[5]),
                    dtype=tf.float64))
        self.k5.assign(
            tf.cast(tf.fill(self.k5.shape, constantArray[6]),
                    dtype=tf.float64))
        self.k5n.assign(
            tf.cast(tf.fill(self.k5n.shape, constantArray[7]),
                    dtype=tf.float64))
        self.k6.assign(
            tf.cast(tf.fill(self.k6.shape, constantArray[8]),
                    dtype=tf.float64))
        self.kdI.assign(
            tf.cast(tf.fill(self.kdI.shape, constantArray[9]),
                    dtype=tf.float64))
        self.kdT.assign(
            tf.cast(tf.fill(self.kdT.shape, constantArray[10]),
                    dtype=tf.float64))
        self.TA0.assign(
            tf.cast(tf.fill(self.TA0.shape, activInitC), dtype=tf.float64))
        self.TI0.assign(
            tf.cast(tf.fill(self.TI0.shape, inhibInitC), dtype=tf.float64))
        self.E0.assign(tf.constant(enzymeInitTensor, dtype=tf.float64))

        #used in the first layer:
        self.firstLayerTA0.assign(
            tf.cast(tf.fill(self.firstLayerTA0.shape, activInitC),
                    dtype=tf.float64))
        self.firstLayerK1M.assign(
            tf.cast(tf.fill(
                self.firstLayerK1M.shape,
                constantArray[0] / (constantArray[1] + constantArray[2])),
                    dtype=tf.float64))
        self.firstLayerkdT.assign(
            tf.cast(tf.fill(self.firstLayerkdT.shape, constantArray[10]),
                    dtype=tf.float64))
        self.firstLayerk2.assign(
            tf.cast(tf.fill(self.firstLayerk2.shape, constantArray[2]),
                    dtype=tf.float64))

        #intermediate values for faster computations:
        self.k1M.assign(self.k1 / (self.k1n + self.k2))
        self.Cactiv.assign(self.k2 * self.k1M * self.E0 * self.TA0)
        self.k5M.assign(self.k5 / (self.k5n + self.k6))
        self.k3M.assign(self.k3 / (self.k3n + self.k4))
        self.Cinhib.assign(
            tf.stack([self.k6 * self.k5M] * (self.k4.shape[0]), axis=0) *
            self.k4 * self.k3M * self.E0 * self.E0 * self.TI0)
        self.Kactiv.assign(self.k1M * self.TA0)
        self.Kinhib.assign(self.k3M * self.TI0)

        self.mask.assign(
            tf.cast(tf.where(tf.less(self.kernel, -0.2), -1.,
                             tf.where(tf.less(0.2, self.kernel), 1., 0.)),
                    dtype=tf.float64))

        self.cstList = [
            self.k1, self.k1n, self.k2, self.k3, self.k3n, self.k4, self.k5,
            self.k5n, self.k6, self.kdI, self.kdT, self.TA0, self.E0, self.k1M,
            self.Cactiv, self.Cinhib, self.Kactiv, self.Kinhib, self.k5M,
            self.k3M, self.firstLayerTA0, self.firstLayerK1M,
            self.firstLayerkdT, self.firstLayerk2
        ]
        self.cstListName = [
            "self.k1", "self.k1n", "self.k2", "self.k3", "self.k3n", "self.k4",
            "self.k5", "self.k5n", "self.k6", "self.kdI", "self.kdT",
            "self.TA0", "self.E0", "self.k1M", "self.Cactiv", "self.Cinhib",
            "self.Kactiv", "self.Kinhib", "self.k5M", "self.k3M",
            "self.firstLayerTA0", "self.firstLayerK1M", "self.firstLayerkdT",
            "self.firstLayerk2"
        ]
Example #34
0
 def biasVariable(self, shape):
     bias = tf.constant(0.01, shape=shape)
     return tf.Variable(bias, name='b')
Example #35
0
    """ pred: B*NUM_CLASSES,
        label: B, """
    # classification
    classify_loss = tf.losses.sparse_softmax_cross_entropy(logits=pred, labels=label, weights=smpw)
    # classify_loss = tf.reduce_mean(loss)
    tf.summary.scalar('classify loss', classify_loss)
    # separate each other in keypoints
    keypoints = end_points['keypoints']
    separation_loss = losses.Separation_loss(keypoints,delta = 0.05)
    tf.summary.scalar('separation_loss', separation_loss)
    # close
    grouped_key = end_points['grouped_xyz']
    capture_loss = losses.Capture_loss(keypoints, grouped_key, theta = 0.05) 
    tf.summary.scalar('capture_loss', capture_loss)
  
    total_loss = classify_loss + separation_loss + capture_loss 

    tf.add_to_collection('losses', total_loss)
    return total_loss






if __name__ == '__main__':
    with tf.Graph().as_default():
        inputs = tf.zeros((32, 1024, 3))
        output, _ = get_model(inputs, tf.constant(True))
        #print(output)
Example #36
0
def create_vocabulary_lookup_table(filename, default_value=None):
    """Creates a lookup table for a vocabulary file.

  Args:
    filename: Path to a vocabulary file containg one word per line.
      Each word is mapped to its line number.
    default_value: UNK tokens will be mapped to this id.
      If None, UNK tokens will be mapped to [vocab_size]

    Returns:
      A tuple (vocab_to_id_table, id_to_vocab_table,
      word_to_count_table, vocab_size). The vocab size does not include
      the UNK token.
    """
    if not gfile.Exists(filename):
        raise ValueError("File does not exist: {}".format(filename))

    # Load vocabulary into memory
    with gfile.GFile(filename) as file:
        vocab = list(line.strip("\n") for line in file)
    vocab_size = len(vocab)

    has_counts = len(vocab[0].split("\t")) == 2
    if has_counts:
        vocab, counts = zip(*[_.split("\t") for _ in vocab])
        counts = [float(_) for _ in counts]
        vocab = list(vocab)
    else:
        counts = [-1. for _ in vocab]

    # Add special vocabulary items
    special_vocab = get_special_vocab(vocab_size)
    vocab += list(special_vocab._fields)
    vocab_size += len(special_vocab)
    counts += [-1. for _ in list(special_vocab._fields)]

    if default_value is None:
        default_value = special_vocab.UNK

    tf.logging.info("Creating vocabulary lookup table of size %d", vocab_size)

    vocab_tensor = tf.constant(vocab)
    count_tensor = tf.constant(counts, dtype=tf.float32)
    vocab_idx_tensor = tf.range(vocab_size, dtype=tf.int64)

    # Create ID -> word mapping
    id_to_vocab_init = tf.contrib.lookup.KeyValueTensorInitializer(
        vocab_idx_tensor, vocab_tensor, tf.int64, tf.string)
    id_to_vocab_table = tf.contrib.lookup.HashTable(id_to_vocab_init, "UNK")

    # Create word -> id mapping
    vocab_to_id_init = tf.contrib.lookup.KeyValueTensorInitializer(
        vocab_tensor, vocab_idx_tensor, tf.string, tf.int64)
    vocab_to_id_table = tf.contrib.lookup.HashTable(vocab_to_id_init,
                                                    default_value)

    # Create word -> count mapping
    word_to_count_init = tf.contrib.lookup.KeyValueTensorInitializer(
        vocab_tensor, count_tensor, tf.string, tf.float32)
    word_to_count_table = tf.contrib.lookup.HashTable(word_to_count_init, -1)

    return vocab_to_id_table, id_to_vocab_table, word_to_count_table, vocab_size
import tensorflow as tf
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

x = tf.constant([23, 34, 56], name='x')
y = tf.Variable(x + 5, name='y')

model = tf.global_variables_initializer()

with tf.Session() as session:
    session.run(model)
    print(session.run(y))
    y = y + 15
    y = y + 100
    print(session.run(y))
Example #38
0
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
Example #39
0
 def _event_shape_tensor(self):
     return tf.constant([], dtype=tf.int32)
Example #40
0
def bias_variable(shape):
    initial = tf.constant(0.0, shape=shape)
    #     initial = tf.truncated_normal(shape, stddev=0.01)
    return tf.Variable(initial, 'b')
    print 'val shape: ' + str(val.get_shape())
    print 'val shape[0]: ' + str(val.get_shape()[0])
    print 'target shape: ' + str(target.get_shape())
    print 'target shape[0]: ' + str(target.get_shape()[0])
    print 'target shape[1]: ' + str(target.get_shape()[1])


val = tf.transpose(val, [1,0,2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)

if debug_mode:
    print 'val shape after transpose: ' + str(val.get_shape())
    print 'last value: ' + str(last)

weight = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))

prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
cross_entropy = -tf.reduce_sum(target * tf.log(tf.clip_by_value(prediction, 1e-10, 1.0)))

optimizer = tf.train.AdamOptimizer()
minimize = optimizer.minimize(cross_entropy)

mistakes = tf.not_equal(tf.argmax(target,1), tf.argmax(prediction,1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
if debug_mode:
    print 'mistakes: ' + str(mistakes)
    print 'error: ' + str(error)


init_op = tf.global_variables_initializer()
def net(x,
        feed_dict_seq,
        seq_length,
        batch_size,
        vocab_size,
        embd,
        starter,
        mode="train"):

    with tf.variable_scope(name_or_scope='RNN',
                           values=[x],
                           reuse=tf.AUTO_REUSE):

        if mode == "train" or mode == "eval":
            inputs = x
            c0 = tf.zeros([batch_size, rnn_size[0]], tf.float32)
            h0 = tf.zeros([batch_size, rnn_size[0]], tf.float32)
            c1 = tf.zeros([batch_size, rnn_size[1]], tf.float32)
            h1 = tf.zeros([batch_size, rnn_size[1]], tf.float32)
        elif mode == "export":
            inputs = x
            c0 = tf.placeholder(tf.float32,
                                shape=(batch_size, rnn_size[0]),
                                name="c0")
            h0 = tf.placeholder(tf.float32,
                                shape=(batch_size, rnn_size[0]),
                                name="h0")
            c1 = tf.placeholder(tf.float32,
                                shape=(batch_size, rnn_size[1]),
                                name="c1")
            h1 = tf.placeholder(tf.float32,
                                shape=(batch_size, rnn_size[1]),
                                name="h1")
        else:
            # Use placeholder in inference mode for both input and states
            # This allows taking the previous batch (step)'s output
            # as the input for the next batch.
            inputs = tf.placeholder(tf.int32,
                                    shape=(batch_size, seq_length),
                                    name="inputs")
            initial_value = np.array(starter, dtype=np.int32)
            feed_dict_seq[inputs] = initial_value

            c0 = tf.placeholder(tf.float32,
                                shape=(batch_size, rnn_size[0]),
                                name="c0")
            h0 = tf.placeholder(tf.float32,
                                shape=(batch_size, rnn_size[0]),
                                name="h0")
            c1 = tf.placeholder(tf.float32,
                                shape=(batch_size, rnn_size[1]),
                                name="c1")
            h1 = tf.placeholder(tf.float32,
                                shape=(batch_size, rnn_size[1]),
                                name="h1")

            feed_dict_seq[c0] = np.zeros((batch_size, rnn_size[0]),
                                         dtype=float)
            feed_dict_seq[h0] = np.zeros((batch_size, rnn_size[0]),
                                         dtype=float)
            feed_dict_seq[c1] = np.zeros((batch_size, rnn_size[1]),
                                         dtype=float)
            feed_dict_seq[h1] = np.zeros((batch_size, rnn_size[1]),
                                         dtype=float)

        initial_state = (rnn.LSTMStateTuple(c0,
                                            h0), rnn.LSTMStateTuple(c1, h1))

        cell = rnn.MultiRNNCell([
            rnn.LSTMBlockCell(num_units=rnn_size[i])
            for i in range(num_rnn_layer)
        ])

        if len(embd) > 0:
            embeddingW = tf.get_variable('embedding',
                                         initializer=tf.constant(embd),
                                         trainable=True)
        else:
            embeddingW = tf.get_variable('embedding',
                                         [vocab_size, rnn_size[0]])

        input_feature = tf.nn.embedding_lookup(embeddingW, inputs)

        input_list = tf.unstack(input_feature, axis=1)

        outputs, last_state = tf.nn.static_rnn(cell, input_list, initial_state)

        output = tf.reshape(tf.concat(outputs, 1), [-1, rnn_size[1]])

        logits = tf.layers.dense(
            inputs=tf.layers.flatten(output),
            units=vocab_size,
            activation=tf.identity,
            use_bias=True,
            kernel_initializer=tf.contrib.layers.variance_scaling_initializer(
                2.0),
            bias_initializer=tf.zeros_initializer())

        return logits, last_state, inputs
Example #43
0
    def __init__(
      self, sequence_length, num_classes, vocab_size,
      embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):

        # Placeholders for input, output and dropout
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        # Embedding layer
        with tf.device('/gpu:0'), tf.name_scope("embedding"):
            W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
                name="W")
            self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x)
            # insert another dimension of 1 at the end
            # channel is 1
            # (batch_size, max_num_word, word_embedding_size,1) ?
            self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
            print('embedding shape: {}'.format(self.embedded_chars_expanded.get_shape()))
        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.name_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                filter_shape = [filter_size, embedding_size, 1, num_filters]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
                print('conv2d W shape: {}'.format(W.get_shape()))
                conv = tf.nn.conv2d(
                    self.embedded_chars_expanded,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                pooled_outputs.append(pooled)

        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        self.h_pool = tf.concat(3, pooled_outputs)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])

        # Add dropout
        with tf.name_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)

        # Final (unnormalized) scores and predictions
        with tf.name_scope("output"):
            W = tf.get_variable(
                "W",
                shape=[num_filters_total, num_classes],
                initializer=tf.contrib.layers.xavier_initializer())
            b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
            self.predictions = tf.argmax(self.scores, 1, name="predictions")

        # CalculateMean cross-entropy loss
        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
            self.wrong_predictions = tf.not_equal(self.predictions, tf.argmax(self.input_y, 1))

        with tf.name_scope("error"):
            self.error = 1-self.accuracy
 def comp():
   return tf.constant(10)
Example #45
0
from BertMulticlassClassifier import CustomBertForSequenceClassification
from utils import update_bert_config

RUN_SPECIAL_LAYER_TEST = False
RUN_BERT_TEST = True

if (RUN_BERT_TEST):
  import tensorflow as tf
  from transformers import BertTokenizer, TFBertModel, BertConfig

  # Tokenizer
  tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
  input_ids = tf.constant(tokenizer.encode("men trekanten havde ikke noget tøj på"))[None, :]  # Batch size 1
  print(input_ids)

  # Set config
  config = BertConfig.from_pretrained('bert-base-multilingual-cased')
  updates = {"scale_logits": True,
             "num_labels": 3,
             "scf_min": 0.3,
             "scf_max": 2.0,
            #  "apply_dropconnect": True,
             "dropconnect_prob": 0.8,
             "noise_distribution": "normal",
             "noise_amount": 0.025}
  config = update_bert_config(
    config,
    updates
  )

  # Instantiate model
                input_tensor=input_tensor,
                name='attentive_rnn_loss/attentive_inference'
            )

            attentive_autoencoder_input = tf.concat((attentive_rnn_out['final_attention_map'],
                                                     input_tensor), axis=-1)

            output = self._attentive_gan.build_autoencoder(
                input_tensor=attentive_autoencoder_input,
                name='attentive_autoencoder_loss/autoencoder_inference'
            )

        return output['skip_3'], attentive_rnn_out['attention_map_list']


if __name__ == '__main__':
    """
    test
    """
    input_tensor = tf.placeholder(dtype=tf.float32, shape=[5, 256, 256, 3])
    label_tensor = tf.placeholder(dtype=tf.float32, shape=[5, 256, 256, 3])
    mask_tensor = tf.placeholder(dtype=tf.float32, shape=[5, 256, 256, 1])

    net = DeRainNet(tf.constant('train', tf.string))

    g_loss, d_loss, _ = net.compute_loss(input_tensor, label_tensor, mask_tensor, 'loss')
    g_loss2, d_loss2, _ = net.compute_loss(input_tensor, label_tensor, mask_tensor, 'loss', reuse=True)

    for vv in tf.trainable_variables():
        print(vv.name)
    def _init(self,
              ob_space,
              ac_space,
              hid_size,
              num_hid_layers,
              gaussian_fixed_var=True):
        assert isinstance(ob_space, gym.spaces.Box)

        self.pdtype = pdtype = make_pdtype(ac_space)
        sequence_length = None

        ob = U.get_placeholder(name="ob",
                               dtype=tf.float32,
                               shape=[sequence_length] + list(ob_space.shape))

        with tf.variable_scope("obfilter"):
            self.ob_rms = RunningMeanStd(shape=ob_space.shape)

        with tf.variable_scope('vf'):
            obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std,
                                   -5.0, 5.0)
            last_out = obz
            # for i in range(num_hid_layers):
            # last_out = tf.nn.tanh(tf.layers.dense(last_out, hid_size, name = "fc%i" % (i + 1),
            #                                       kernel_initializer = U.normc_initializer(1.0)))
            self.vpred = tf.layers.dense(
                last_out,
                1,
                name='final',
                kernel_initializer=U.normc_initializer(1.0))[:, 0]

        with tf.variable_scope('pol'):
            last_out = obz
            # for i in range(num_hid_layers):
            #     last_out = tf.nn.tanh(tf.layers.dense(last_out, hid_size, name = 'fc%i' % (i + 1), kernel_initializer = U.normc_initializer(1.0)))
            if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
                mean = tf.layers.dense(
                    last_out,
                    pdtype.param_shape()[0] // 2,
                    name='final',
                    kernel_initializer=U.normc_initializer(0.01))
                # logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer())
                # pdparam = tf.concat([mean, mean * 0.0 + tf.ones(pdtype.param_shape()[0])//2], axis = 1)
                # logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer())
                logstd = tf.multiply(
                    tf.ones(shape=[1, pdtype.param_shape()[0] // 2]),
                    tf.constant(1.0 / ac_space.shape[0]))
                pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
            else:
                pdparam = tf.layers.dense(
                    last_out,
                    pdtype.param_shape()[0],
                    name='final',
                    kernel_initializer=U.normc_initializer(0.01))

        pdparam = tf.clip_by_value(pdparam, -10.0, 10.0)
        self.pd = pdtype.pdfromflat(pdparam)

        self.state_in = []
        self.state_out = []

        stochastic = tf.placeholder(dtype=tf.bool, shape=())
        ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
        self._act = U.function([stochastic, ob], [ac, self.vpred])
def bias_variable(shape):
    """ Return a tensorflow.Variable for bias. """
    return tf.Variable(tf.constant(0.1, shape=shape))
Example #49
0
 def _forward_log_det_jacobian(self, x):
     return tf.constant(-2., x.dtype)
Example #50
0
import tensorflow as tf

node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly

sess = tf.Session()
print(sess.run([node1, node2]))
  def test_outputs(self, strategy):

    # 0 represents the void class label
    thing_class_ids = [0, 1, 2, 3, 4]
    stuff_class_ids = [0, 5, 6, 7, 8, 9, 10]
    all_class_ids = set(thing_class_ids + stuff_class_ids)

    num_thing_classes = len(thing_class_ids)
    num_stuff_classes = len(stuff_class_ids)
    num_classes_for_segmentation = num_stuff_classes + 1

    # all thing classes are mapped to class_id=1, stuff class ids are offset
    # such that the stuff class_ids start from 2, this means the semantic
    # segmentation head will have ground truths with class_ids belonging to
    # [0, 1, 2, 3, 4, 5, 6, 7]

    config = {
        'output_size': [640, 640],
        'max_num_detections': 100,
        'stuff_classes_offset': 3,
        'mask_binarize_threshold': 0.5,
        'score_threshold': 0.005,
        'things_class_label': 1,
        'void_class_label': 0,
        'void_instance_id': -1
    }
    generator = PANOPTIC_SEGMENTATION_GENERATOR(**config)

    crop_height = 112
    crop_width = 112

    boxes = tf.constant([[
        [167, 398, 342, 619],
        [192, 171, 363, 449],
        [211, 1, 382, 74]
    ]])

    num_detections = boxes.get_shape().as_list()[1]
    scores = tf.random.uniform([1, num_detections], 0, 1)
    classes = tf.random.uniform(
        [1, num_detections],
        1, num_thing_classes, dtype=tf.int32)
    masks = tf.random.normal(
        [1, num_detections, crop_height, crop_width])

    segmentation_mask = tf.random.uniform(
        [1, *config['output_size']],
        0, num_classes_for_segmentation, dtype=tf.int32)
    segmentation_mask_one_hot = tf.one_hot(
        segmentation_mask, depth=num_stuff_classes + 1)

    inputs = {
        'detection_boxes': boxes,
        'detection_scores': scores,
        'detection_classes': classes,
        'detection_masks': masks,
        'num_detections': tf.constant([num_detections]),
        'segmentation_outputs': segmentation_mask_one_hot
        }

    def _run(inputs):
      return generator(inputs=inputs)

    @tf.function
    def _distributed_run(inputs):
      outputs = strategy.run(_run, args=((inputs,)))
      return strategy.gather(outputs, axis=0)

    outputs = _distributed_run(inputs)

    self.assertIn('category_mask', outputs)
    self.assertIn('instance_mask', outputs)

    self.assertAllEqual(
        outputs['category_mask'][0].get_shape().as_list(),
        config['output_size'])

    self.assertAllEqual(
        outputs['instance_mask'][0].get_shape().as_list(),
        config['output_size'])

    for category_id in np.unique(outputs['category_mask']):
      self.assertIn(category_id, all_class_ids)
Example #52
0
 def testComposeFromTensor(self):
     x = tf.constant([-5., 0., 5.])
     self.assertAllClose(*self.evaluate([tf.exp(x), tfb.Exp()(x)]),
                         atol=0,
                         rtol=1e-3)
Example #53
0
 def _make_column(value):
   value = tf.constant(value, dtype=ids.dtype)
   if batch_size is not None:
     value = tf.fill([batch_size], value)
   return tf.expand_dims(value, -1)
Example #54
0
 def _inverse_log_det_jacobian(self, y):
     return tf.constant(2., y.dtype)
Example #55
0
    def _generate_examples(self, label_images: Union[str, dict]):
        """Generate example for each image in the dict."""

        temp_dir = mkdtemp(prefix=self.name)

        if isinstance(label_images, str):
            assert path.isdir(label_images)
            print("label_images:", label_images, ";")
            (
                self._split_examples,
                labels,
            ) = tfds.folder_dataset.image_folder._get_split_label_images(
                path.dirname(label_images)
            )
            self.info.features["label"].names = sorted(labels)
            split_dict = tfds.core.SplitDict(self.name)

            label_images = {label: [] for label in self.info.features["label"].names}

            for split_name, examples in self._split_examples.items():
                split_dict.add(
                    tfds.core.SplitInfo(
                        name=split_name,
                        shard_lengths=[len(examples)],
                    )
                )

                # TODO: This in a generator so it doesn't fill memory
                for example in examples:
                    label_images[example.label].append(example.image_path)
            self.info.update_splits_if_different(split_dict)

        for label, image_paths in label_images.items():
            for image_path in image_paths:
                key = posixpath.sep.join((label, posixpath.basename(image_path)))

                temp_image_filename = os.path.join(
                    temp_dir,
                    key.replace(posixpath.sep, "_").replace(os.path.sep, "_"),
                )

                if BaseImageLabelFolder.session._closed:
                    BaseImageLabelFolder.session = tf.compat.v1.Session()
                    BaseImageLabelFolder.session.__enter__()

                image_decoded = tf.image.decode_jpeg(
                    tf.io.read_file(image_path), channels=3 if self.rgb else 1
                )
                resized = tf.image.resize(image_decoded, self.resolution)
                enc = tf.image.encode_jpeg(
                    tf.cast(resized, tf.uint8),
                    "rgb" if self.rgb else "grayscale",
                    quality=100,
                    chroma_downsampling=False,
                )
                fwrite = tf.io.write_file(tf.constant(temp_image_filename), enc)
                result = BaseImageLabelFolder.session.run(fwrite)

                yield key, {
                    "image/filename": temp_image_filename,
                    "image": temp_image_filename,
                    "label": label,
                }

        print("resolved all files, now you should delete: {!r}".format(temp_dir))
        if not BaseImageLabelFolder.session._closed:
            BaseImageLabelFolder.session.__exit__(None, None, None)
Example #56
0
def init_bias_variable(shape):
	initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
	return tf.Variable(initial)
Example #57
0
def main():
    if len(sys.argv) > 1:
        test_image_fn = sys.argv[1]
        if not os.path.exists(test_image_fn):
            print("Not found:", test_image_fn)
            sys.exit(-1)
    else:
        # Select a test image from a test directory
        test_dirs = [
            os.path.join(CROPPED_AUG_IMAGE_DIR, class_name, 'test')
            for class_name in common.CLASS_NAME
        ]
        test_dir = np.random.choice(test_dirs)
        test_images_fn = [test_image for test_image in os.listdir(test_dir)]
        test_image_fn = np.random.choice(test_images_fn, 1)[0]
        test_image_fn = os.path.join(test_dir, test_image_fn)
    print("Test image:", test_image_fn)

    # Open and resize a test image
    test_image_org = (ndimage.imread(test_image_fn).astype(np.float32) -
                      PIXEL_DEPTH / 2) / PIXEL_DEPTH
    test_image_org.resize((CNN_IN_HEIGHT, CNN_IN_WIDTH, CNN_IN_CH))
    test_image = test_image_org.reshape(
        (1, CNN_IN_WIDTH, CNN_IN_HEIGHT, CNN_IN_CH))

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Variables
        w_conv1 = tf.Variable(
            tf.truncated_normal(
                [FLAGS.patch_size, FLAGS.patch_size, FLAGS.num_channels, 48],
                stddev=0.1))
        b_conv1 = tf.Variable(tf.constant(0.1, shape=[48]))

        w_conv2 = tf.Variable(
            tf.truncated_normal(
                [FLAGS.patch_size, FLAGS.patch_size, 48, 64], stddev=0.1))
        b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))

        w_conv3 = tf.Variable(
            tf.truncated_normal(
                [FLAGS.patch_size, FLAGS.patch_size, 64, 128], stddev=0.1))
        b_conv3 = tf.Variable(tf.constant(0.1, shape=[128]))

        w_fc1 = tf.Variable(
            tf.truncated_normal([16 * 4 * 128, 2048], stddev=0.1))
        b_fc1 = tf.Variable(tf.constant(0.1, shape=[2048]))

        w_fc2 = tf.Variable(tf.truncated_normal([2048, FLAGS.num_classes]))
        b_fc2 = tf.Variable(tf.constant(0.1, shape=[FLAGS.num_classes]))

        params = [
            w_conv1, b_conv1, w_conv2, b_conv2, w_conv3, b_conv3, w_fc1, b_fc1,
            w_fc2, b_fc2
        ]

        # restore weights
        f = "weights.npz"
        if os.path.exists(f):
            initial_weights = load_initial_weights(f)
        else:
            initial_weights = None

        if initial_weights is not None:
            assert len(initial_weights) == len(params)
            assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

        # A placeholder for a test image
        tf_test_image = tf.constant(test_image)

        # model
        logits = model(tf_test_image, w_conv1, b_conv1, w_conv2, b_conv2,
                       w_conv3, b_conv3, w_fc1, b_fc1, w_fc2, b_fc2)
        test_pred = tf.nn.softmax(logits)

        # Restore ops
        saver = tf.train.Saver()

    # Recognize a brand logo of test image
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned weights')
        elif os.path.exists("models"):
            save_path = "models/deep_logo_model"
            saver.restore(session, save_path)
            print('Model restored')
        else:
            print('initialized')
        pred = session.run([test_pred])
        print("Class name:", common.CLASS_NAME[np.argmax(pred)])
        print("Probability:", np.max(pred))
Example #58
0
# n-D
X = tf.placeholder(tf.float64, [None, DIM_NUM])


W = tf.Variable(tf.zeros([DIM_NUM, 1], dtype=tf.float64), dtype = tf.float64)
b = tf.Variable(tf.zeros([1], dtype=tf.float64), dtype = tf.float64)

logits = tf.matmul(X, W)+b
sig_v = tf.sigmoid(logits)
# loss =  tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = Y,
#                                                 logits = logits,
#                                                 name="mpc_sce"))
# binary classes [0, 1]
Y = tf.placeholder(tf.float64, [None, 1])
#loss = tf.reduce_mean(-Y*tf.log(sig_v) -(1 - Y) * tf.log(1 - sig_v))
ONE = tf.constant(0.5, dtype=tf.float64)
loss = tf.reduce_mean(-Y*tf.log(sig_v) - tf.subtract(ONE, Y) * tf.log(1 - sig_v))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

init = tf.global_variables_initializer()

file_name_prefix = str("../datasets/") + str(DIM_NUM) + "D/" + str(DIM_NUM) + "d"

#parser = argparse.ArgumentParser(description="MPC Logistic Regression with SCE loss demo")
#parser.add_argument('--party_id', type=int, help="Party ID")
#args = parser.parse_args()
my_party_id = args.party_id

if my_party_id == 2:
    my_party_id = 0
Example #59
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/3/16 下午1:28
# @Author  : guifeng([email protected])
# @File    : tensorflowapi.py
import tensorflow as tf

a = tf.constant([1,1,1,0,0,0,1,1,1,0, 0,0,1,1,1,0,0,1,1,0,0,1,1,0,0],dtype=tf.float32,shape=[1,5,5,1])
b = tf.constant([1,0,1,0,1,0,1,0,1],dtype=tf.float32,shape=[3,3,1,1])
c = tf.nn.conv2d(a,b,strides=[1, 2, 2, 1],padding='VALID')
d = tf.nn.conv2d(a,b,strides=[1, 2, 2, 1],padding='SAME')
with tf.Session() as sess:
    print ("c shape:")
    print (c.shape)
    print ("c value:")
    print (sess.run(c))
    print ("d shape:")
    print (d.shape)
    print ("d value:")
    print (sess.run(d))
def FCN_32s(image_batch_tensor, number_of_classes, is_training, reuse=None):
    """Returns the FCN-32s model definition.
    The function returns the model definition of a network that was described
    in 'Fully Convolutional Networks for Semantic Segmentation' by Long et al.
    The network subsamples the input by a factor of 32 and uses the bilinear
    upsampling kernel to upsample prediction by a factor of 32. This means that
    if the image size is not of the factor 32, the prediction of different size
    will be delivered. To adapt the network for an any size input use 
    adapt_network_for_any_size_input(FCN_32s, 32). Note: the upsampling kernel
    is fixed in this model definition, because it didn't give significant
    improvements according to aforementioned paper.
    
    Parameters
    ----------
    image_batch_tensor : [batch_size, height, width, depth] Tensor
        Tensor specifying input image batch
    number_of_classes : int
        An argument specifying the number of classes to be predicted.
        For example, for PASCAL VOC it is 21.
    is_training : boolean
        An argument specifying if the network is being evaluated or trained.
        It affects the work of underlying dropout layer of VGG-16.
    
    Returns
    -------
    upsampled_logits : [batch_size, height, width, number_of_classes] Tensor
        Tensor with logits representing predictions for each class.
        Be careful, the output can be of different size compared to input,
        use adapt_network_for_any_size_input to adapt network for any input size.
        Otherwise, the input images sizes should be of multiple 32.
    vgg_16_variables_mapping : dict {string: variable}
        Dict which maps the FCN-32s model's variables to VGG-16 checkpoint variables
        names. We need this to initilize the weights of FCN-32s model with VGG-16 from
        checkpoint file. Look at ipython notebook for examples.
    """

    with tf.variable_scope("fcn_32s", reuse=reuse) as fcn_32s_scope:
        #with tf.variable_scope("fcn_32s") as fcn_32s_scope:
        upsample_factor = 32

        # Convert image to float32 before subtracting the
        # mean pixel value
        image_batch_float = tf.to_float(image_batch_tensor)

        # Subtract the mean pixel value from each pixel
        mean_centered_image_batch = image_batch_float - [
            _R_MEAN, _G_MEAN, _B_MEAN
        ]

        upsample_filter_np = bilinear_upsample_weights(upsample_factor,
                                                       number_of_classes)

        upsample_filter_tensor = tf.constant(upsample_filter_np)

        # TODO: make pull request to get this custom vgg feature accepted
        # to avoid using custom slim repo.
        with slim.arg_scope(vgg.vgg_arg_scope()):

            logits, end_points = vgg.vgg_16(mean_centered_image_batch,
                                            num_classes=number_of_classes,
                                            is_training=is_training,
                                            spatial_squeeze=False,
                                            fc_conv_padding='SAME')

        downsampled_logits_shape = tf.shape(logits)

        # Calculate the ouput size of the upsampled tensor
        #upsampled_logits_shape = tf.pack([
        #                                  downsampled_logits_shape[0],
        #                                  downsampled_logits_shape[1] * upsample_factor,
        #                                  downsampled_logits_shape[2] * upsample_factor,
        #                                  downsampled_logits_shape[3]
        #                                 ])

        upsampled_logits_shape = tf.stack([
            downsampled_logits_shape[0],
            downsampled_logits_shape[1] * upsample_factor,
            downsampled_logits_shape[2] * upsample_factor,
            downsampled_logits_shape[3]
        ])

        # Perform the upsampling
        upsampled_logits = tf.nn.conv2d_transpose(
            logits,
            upsample_filter_tensor,
            output_shape=upsampled_logits_shape,
            strides=[1, upsample_factor, upsample_factor, 1])

        # Map the original vgg-16 variable names
        # to the variables in our model. This is done
        # to make it possible to use assign_from_checkpoint_fn()
        # while providing this mapping.
        # TODO: make it cleaner
        vgg_16_variables_mapping = {}

        vgg_16_variables = slim.get_variables(fcn_32s_scope)

        for variable in vgg_16_variables:

            # Here we remove the part of a name of the variable
            # that is responsible for the current variable scope
            # original_vgg_16_checkpoint_string = variable.name[len(fcn_32s_scope.original_name_scope):-2]

            # Updated: changed .name_scope to .name because name_scope only affects operations
            # and variable scope is actually represented by .name
            original_vgg_16_checkpoint_string = variable.name[
                len(fcn_32s_scope.name) + 1:-2]
            vgg_16_variables_mapping[
                original_vgg_16_checkpoint_string] = variable

    return upsampled_logits, vgg_16_variables_mapping