Exemplo n.º 1
0
  def _meshgrid(depth, height, width, z_near, z_far):
    with tf.variable_scope('_meshgrid'):
      x_t = tf.reshape(
          tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]),
          [depth, height, width])
      y_t = tf.reshape(
          tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]),
          [depth, width, height])
      y_t = tf.transpose(y_t, [0, 2, 1])
      sample_grid = tf.tile(
          tf.linspace(float(z_near), float(z_far), depth), [width * height])
      z_t = tf.reshape(sample_grid, [height, width, depth])
      z_t = tf.transpose(z_t, [2, 0, 1])

      z_t = 1 / z_t
      d_t = 1 / z_t
      x_t /= z_t
      y_t /= z_t

      x_t_flat = tf.reshape(x_t, (1, -1))
      y_t_flat = tf.reshape(y_t, (1, -1))
      d_t_flat = tf.reshape(d_t, (1, -1))

      ones = tf.ones_like(x_t_flat)
      grid = tf.concat([d_t_flat, y_t_flat, x_t_flat, ones], 0)
      return grid
Exemplo n.º 2
0
def meshgrid(batch, height, width, is_homogeneous=True):
  """Construct a 2D meshgrid.

  Args:
    batch: batch size
    height: height of the grid
    width: width of the grid
    is_homogeneous: whether to return in homogeneous coordinates
  Returns:
    x,y grid coordinates [batch, 2 (3 if homogeneous), height, width]
  """
  x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                  tf.transpose(tf.expand_dims(
                      tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
  y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                  tf.ones(shape=tf.stack([1, width])))
  x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
  y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
  if is_homogeneous:
    ones = tf.ones_like(x_t)
    coords = tf.stack([x_t, y_t, ones], axis=0)
  else:
    coords = tf.stack([x_t, y_t], axis=0)
  coords = tf.tile(tf.expand_dims(coords, 0), [batch, 1, 1, 1])
  return coords
Exemplo n.º 3
0
    def _meshgrid(height, width, fp):
        x_t = tf.matmul(
            tf.ones(shape=tf.stack([height, 1])),
            tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
        y_t = tf.matmul(
            tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
            tf.ones(shape=tf.stack([1, width])))

        x_t_flat = tf.reshape(x_t, (1, -1))
        y_t_flat = tf.reshape(y_t, (1, -1))

        x_t_flat_b = tf.expand_dims(x_t_flat, 0) # [1, 1, h*w]
        y_t_flat_b = tf.expand_dims(y_t_flat, 0) # [1, 1, h*w]

        num_batch = tf.shape(fp)[0]
        px = tf.expand_dims(fp[:,:,0], 2) # [n, nx*ny, 1]
        py = tf.expand_dims(fp[:,:,1], 2) # [n, nx*ny, 1]
        d = tf.sqrt(tf.pow(x_t_flat_b - px, 2.) + tf.pow(y_t_flat_b - py, 2.))
        r = tf.pow(d, 2) * tf.log(d + 1e-6) # [n, nx*ny, h*w]
        x_t_flat_g = tf.tile(x_t_flat_b, tf.stack([num_batch, 1, 1])) # [n, 1, h*w]
        y_t_flat_g = tf.tile(y_t_flat_b, tf.stack([num_batch, 1, 1])) # [n, 1, h*w]
        ones = tf.ones_like(x_t_flat_g) # [n, 1, h*w]

        grid = tf.concat([ones, x_t_flat_g, y_t_flat_g, r], 1) # [n, nx*ny+3, h*w]
        return grid
def gabor(n_values=32, sigma=1.0, mean=0.0):
	x = tf.linspace(-3.0, 3.0, n_values)
	z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0)/ (2.0 * tf.pow(sigma, 2.0)))) * (1.0 / (sigma * tf.sqrt(2.0 * 3.145))))
	gauss_kernel = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z,[1, n_values]))
	x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
	y = tf.reshape(tf.ones_like(x), [1, n_values])
	gabor_kernel = tf.multiply(tf.matmul(x ,y), gauss_kernel)
	return gabor_kernel
Exemplo n.º 5
0
def get_input_vectors(shape, phases, scaling, offset):
    x = tf.reshape(tf_repeat(offset[0] + tf.linspace(0.0, tf.to_float(shape[0] - 1), shape[0]) / scaling,
                             shape[1] * phases),
                   [shape[0], shape[1], phases]) * tf.pow(2.0, tf.linspace(0.0, tf.to_float(phases - 1), phases))
    y = tf.reshape(tf_repeat(tf.tile(
        offset[1] + tf.linspace(0.0, tf.to_float(shape[1] - 1), shape[1]) / scaling,
        [shape[0]]
    ), phases), [shape[0], shape[1], phases]) * tf.pow(2.0, tf.linspace(0.0, tf.to_float(phases - 1), phases))
    z = tf.reshape(
        tf.tile(offset[2] + 10 * tf.linspace(0.0, tf.to_float(phases - 1), phases), [shape[0] * shape[1]]),
        [shape[0], shape[1], phases, 1])
    x = tf.reshape(x, [shape[0], shape[1], phases, 1])
    y = tf.reshape(y, [shape[0], shape[1], phases, 1])
    return tf.reshape(tf.concat(3, [x, y, z]), [shape[0] * shape[1] * phases, 3])
Exemplo n.º 6
0
def _meshgrid_abs(height, width):
  """Meshgrid in the absolute coordinates."""
  x_t = tf.matmul(
      tf.ones(shape=tf.stack([height, 1])),
      tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
  y_t = tf.matmul(
      tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
      tf.ones(shape=tf.stack([1, width])))
  x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
  y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
  x_t_flat = tf.reshape(x_t, (1, -1))
  y_t_flat = tf.reshape(y_t, (1, -1))
  ones = tf.ones_like(x_t_flat)
  grid = tf.concat([x_t_flat, y_t_flat, ones], axis=0)
  return grid
Exemplo n.º 7
0
def mgrid(*args, **kwargs):
    """
    create orthogonal grid
    similar to np.mgrid

    Parameters
    ----------
    args : int
        number of points on each axis
    low : float
        minimum coordinate value
    high : float
        maximum coordinate value

    Returns
    -------
    grid : tf.Tensor [len(args), args[0], ...]
        orthogonal grid
    """
    low = kwargs.pop("low", -1)
    high = kwargs.pop("high", 1)
    low = tf.to_float(low)
    high = tf.to_float(high)
    coords = (tf.linspace(low, high, arg) for arg in args)
    grid = tf.pack(tf.meshgrid(*coords, indexing='ij'))
    return grid
Exemplo n.º 8
0
  def testNanFromGradsDontPropagate(self):
    """Test that update with NaN gradients does not cause NaN in results."""
    def _nan_log_prob_with_nan_gradient(x):
      return np.nan * tf.reduce_sum(x)

    initial_x = tf.linspace(0.01, 5, 10)
    hmc = tfp.mcmc.HamiltonianMonteCarlo(
        target_log_prob_fn=_nan_log_prob_with_nan_gradient,
        step_size=2.,
        num_leapfrog_steps=5,
        seed=_set_seed(47))
    updated_x, kernel_results = hmc.one_step(
        current_state=initial_x,
        previous_kernel_results=hmc.bootstrap_results(initial_x))
    initial_x_, updated_x_, log_accept_ratio_ = self.evaluate(
        [initial_x, updated_x, kernel_results.log_accept_ratio])
    acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))

    tf.logging.vlog(1, 'initial_x = {}'.format(initial_x_))
    tf.logging.vlog(1, 'updated_x = {}'.format(updated_x_))
    tf.logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_))

    self.assertAllEqual(initial_x_, updated_x_)
    self.assertEqual(acceptance_probs, 0.)

    self.assertAllFinite(
        self.evaluate(tf.gradients(updated_x, initial_x)[0]))
    self.assertAllEqual(
        [True],
        [g is None for g in tf.gradients(
            kernel_results.proposed_results.grads_target_log_prob,
            initial_x)])
Exemplo n.º 9
0
 def _LinSpace(self, start, stop, num):
   # NOTE(touts): Needs to pass a graph to get a new session each time.
   with tf.Graph().as_default() as graph:
     with self.test_session(graph=graph, force_gpu=self.force_gpu):
       tf_ans = tf.linspace(start, stop, num, name="linspace")
       self.assertEqual([num], tf_ans.get_shape())
       return tf_ans.eval()
Exemplo n.º 10
0
    def __init__(self, config):
        self.dim_img = config.dim_img
        self.dim_wav = config.dim_wav
        self.dim_mem = config.dim_mem
        self.mem_size = self.steps = config.mem_size
        
        self.n_hop = config.n_hop
        self.batch_size = config.batch_size
        self.do_prob = config.do_prob # keep prob
        self.nl = config.nl # keep prob
        self.learning_rate = config.learning_rate

        self.global_step = tf.Variable(0, name='g_step')
        self.A = tf.Variable(tf.random_normal([self.dim_wav, self.dim_mem]), name='A')
        self.b_A = tf.Variable(tf.random_normal([self.dim_mem]), name='b_A')
        self.B = tf.Variable(tf.random_normal([self.dim_img, self.dim_mem]), name='B')
        self.b_B = tf.Variable(tf.random_normal([self.dim_mem]), name='b_B')
        self.C = tf.Variable(tf.random_normal([self.dim_wav, self.dim_mem]), name='C')
        self.b_C = tf.Variable(tf.random_normal([self.dim_mem]), name='b_C')
        
        self._temporal = tf.linspace(0.0, np.float32(self.mem_size-1), self.mem_size)
        self.T_A = self.T_C = tf.Variable(self._temporal/tf.reduce_sum(self._temporal))
        
        self.W_o = tf.Variable(tf.random_normal([self.dim_mem, 1]))
        self.b_o = tf.Variable(tf.random_normal([1]))
Exemplo n.º 11
0
    def compute_center_coords(self, y_true, y_pred):
        batch_size = tf.shape(y_pred)[0]
        h = tf.shape(y_pred)[1]
        w = tf.shape(y_pred)[2]
        n_chans = tf.shape(y_pred)[3]
        n_dims = 5

        # weighted center of mass
        x = tf.cast(tf.tile(tf.reshape(self.xs, [1, h, w]), [batch_size, 1, 1]), tf.float32)
        y = tf.cast(tf.tile(tf.reshape(self.ys, [1, h, w]), [batch_size, 1, 1]), tf.float32)

        eps = 1e-8
        # grayscale
        pred_gray = tf.reduce_mean(y_pred, axis=-1)  # should be batch_size x h x w
        # normalize
        pred_gray = pred_gray - tf.reduce_min(pred_gray, axis=[1, 2], keepdims=True)
        pred_gray = pred_gray / (eps + tf.reduce_max(pred_gray, axis=[1, 2], keepdims=True))
        pred_gray = tf.clip_by_value(pred_gray, 0., 1.)

        # make each of these (batch_size, 1)
        weighted_x = tf.round(tf.expand_dims(
            tf.reduce_sum(x * pred_gray, axis=[1, 2]) / (eps + tf.reduce_sum(pred_gray, axis=[1, 2])), axis=-1))
        weighted_y = tf.round(tf.expand_dims(
            tf.reduce_sum(y * pred_gray, axis=[1, 2]) / (eps + tf.reduce_sum(pred_gray, axis=[1, 2])), axis=-1))
        batch_indices = tf.reshape(tf.linspace(0., tf.cast(batch_size, tf.float32) - 1., batch_size), [batch_size, 1])
        indices = tf.cast(tf.concat([batch_indices, weighted_y, weighted_x], axis=-1), tf.int32)
        #center_rgb = transform_network_utils.interpolate([y_true,  weighted_x, weighted_y], constant_vals=1.)
        center_rgb = tf.gather_nd(y_true, indices)
        center_rgb = tf.reshape(center_rgb, [batch_size, n_chans])

        center_point_xyrgb = tf.concat([
                        weighted_x, weighted_y, center_rgb
                    ], axis=-1)

        return pred_gray, center_point_xyrgb
Exemplo n.º 12
0
 def compute_auc(tp, fn, tn, fp, name):
   """Computes the roc-auc or pr-auc based on confusion counts."""
   rec = tf.div(tp + epsilon, tp + fn + epsilon)
   if curve == 'ROC':
     fp_rate = tf.div(fp, fp + tn + epsilon)
     x = fp_rate
     y = rec
   elif curve == 'R':  # recall auc
     x = tf.linspace(1., 0., num_thresholds)
     y = rec
   else:  # curve == 'PR'.
     prec = tf.div(tp + epsilon, tp + fp + epsilon)
     x = rec
     y = prec
   if summation_method == 'trapezoidal':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   (y[:num_thresholds - 1] + y[1:]) / 2.),
       name=name)
   elif summation_method == 'minoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.minimum(y[:num_thresholds - 1], y[1:])),
       name=name)
   elif summation_method == 'majoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.maximum(y[:num_thresholds - 1], y[1:])),
       name=name)
   else:
     raise ValueError('Invalid summation_method: %s' % summation_method)
  def testRWM1DNNormal(self):
    """Sampling from the Standard Normal Distribution."""
    dtype = np.float32

    with self.test_session(graph=tf.Graph()) as sess:
      target = tfd.Normal(loc=dtype(0), scale=dtype(1))

      def make_kernel_fn(target_log_prob_fn, seed):
        return tfp.mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=target_log_prob_fn,
            seed=seed, step_size=1.0, num_leapfrog_steps=3)

      remc = tfp.mcmc.ReplicaExchangeMC(
          target_log_prob_fn=target.log_prob,
          inverse_temperatures=10.**tf.linspace(0., -2., 5),
          make_kernel_fn=make_kernel_fn,
          seed=42)

      samples, _ = tfp.mcmc.sample_chain(
          num_results=1000,
          current_state=dtype(1),
          kernel=remc,
          num_burnin_steps=500,
          parallel_iterations=1)  # For determinism.

      sample_mean = tf.reduce_mean(samples, axis=0)
      sample_std = tf.sqrt(
          tf.reduce_mean(tf.squared_difference(samples, sample_mean),
                         axis=0))
      [sample_mean_, sample_std_] = sess.run([sample_mean, sample_std])

    self.assertAllClose(sample_mean_, 0., atol=0.1, rtol=0.1)
    self.assertAllClose(sample_std_, 1., atol=0.1, rtol=0.1)
Exemplo n.º 14
0
 def testBijector(self, lower, upper):
   bijector = tfb.Reciprocal()
   self.assertEqual('reciprocal', bijector.name)
   x = tf.linspace(lower, upper, 100)
   y = 1. / x
   self.assertAllClose(self.evaluate(y), self.evaluate(bijector.forward(x)))
   self.assertAllClose(self.evaluate(x), self.evaluate(bijector.inverse(y)))
Exemplo n.º 15
0
def w(input_data, cu, kappas_t_1, config):
	
	batch_size = config.batch_size
	mixture_size = config.mixture_size
	vocab_length = config.vocab_length

	# split along dim of mixture size * 3
	hat_alphas_t, hat_betas_t, hat_kappas_t = tf.split(1, 3, input_data)

	alphas_t = tf.exp(hat_alphas_t)
	betas_t = tf.exp(hat_betas_t)
	kappas_t = tf.add(kappas_t_1, tf.exp(hat_kappas_t))

	speech_length = tf.shape(cu)[1]

	u = tf.linspace(1.0, tf.cast(speech_length,tf.float32) , speech_length)
	u = tf.expand_dims(u, 0)
	u = tf.expand_dims(u, 0)
	u = tf.tile(u, [batch_size, mixture_size, 1])

	alphas_t_expanded = tf.tile(tf.expand_dims(alphas_t, -1), [1, 1, speech_length])
	betas_t_expanded = tf.tile(tf.expand_dims(betas_t, -1), [1, 1, speech_length])
	kappas_t_expanded = tf.tile(tf.expand_dims(kappas_t, -1), [1, 1, speech_length])

	calc = tf.square(tf.sub(kappas_t_expanded, u))
	calc = tf.mul(calc, tf.neg(betas_t_expanded))
	calc = tf.exp(calc)
	calc = tf.mul(calc, alphas_t_expanded)

	phi_t = tf.expand_dims(tf.reduce_sum(calc, 1), 1)

	output = tf.squeeze(tf.batch_matmul(phi_t, cu), [1])

	return output, kappas_t, phi_t
    def _meshgrid(self, height, width, depth):
        x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                        tf.transpose(tf.expand_dims(tf.linspace(0.0,
                                                                tf.cast(width, tf.float32)-1.0, width), 1), [1, 0]))
        y_t = tf.matmul(tf.expand_dims(tf.linspace(0.0,
                                                   tf.cast(height, tf.float32)-1.0, height), 1),
                        tf.ones(shape=tf.stack([1, width])))

        x_t = tf.tile(tf.expand_dims(x_t, 2), [1, 1, depth])
        y_t = tf.tile(tf.expand_dims(y_t, 2), [1, 1, depth])

        z_t = tf.linspace(0.0, tf.cast(depth, tf.float32)-1.0, depth)
        z_t = tf.expand_dims(tf.expand_dims(z_t, 0), 0)
        z_t = tf.tile(z_t, [height, width, 1])

        return x_t, y_t, z_t
Exemplo n.º 17
0
 def _lin_space_weights(num, img_size):
   if num > 1:
     start_weights = tf.linspace(img_size - 1.0, 0.0, num)
     stop_weights = img_size - 1 - start_weights
   else:
     start_weights = tf.constant(num * [.5 * (img_size - 1)], dtype=tf.float32)
     stop_weights = tf.constant(num * [.5 * (img_size - 1)], dtype=tf.float32)
   return (start_weights, stop_weights)
Exemplo n.º 18
0
 def test_finds_max_of_long_array(self):
   # d - 1 == d in float32 and d = 3e7.
   # So this test only passes if we use double for the percentile indices.
   # If float is used, it fails with InvalidArgumentError about an index out of
   # bounds.
   x = tf.linspace(0., 3e7, num=int(3e7))
   minval = tfd.percentile(x, q=0, validate_args=True)
   self.assertAllEqual(0, self.evaluate(minval))
Exemplo n.º 19
0
 def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
   """Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
   with self._name_scope(name, values=[a, p]):
     # Linspace only takes scalars, so we'll add in the offset afterwards.
     seq = tf.linspace(
         tf.constant(0., dtype=self.dtype), 0.5 - 0.5 * p, tf.cast(
             p, tf.int32))
     return seq + tf.expand_dims(a, [-1])
def calculate_image(noise_values, phases, shape):
    val = tf.floor((tf.add_n(tf.split(
        2,
        phases,
        tf.reshape(noise_values, [shape[0], shape[1], phases]) / tf.pow(
            2.0,
            tf.linspace(0.0, tf.to_float(phases - 1), phases))
    )) + 1.0) * 128)
    return tf.concat(2, [val, val, val])
Exemplo n.º 21
0
    def _meshgrid(height, width):
        # This should be equivalent to:
        #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
        #                         np.linspace(-1, 1, height))
        #  ones = np.ones(np.prod(x_t.shape))
        #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
        x_t = tf.matmul(
            tf.ones(shape=tf.stack([height, 1])),
            tf.transpose(a=tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), perm=[1, 0])
        )
        y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), tf.ones(shape=tf.stack([1, width])))

        x_t_flat = tf.reshape(x_t, (1, -1))
        y_t_flat = tf.reshape(y_t, (1, -1))

        ones = tf.ones_like(x_t_flat)
        grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
        return grid
Exemplo n.º 22
0
 def make_variable(self):
   n = 256
   shape = (n, n, n)
   items = n**3
   var = tf.Variable(
       tf.reshape(
           tf.linspace(1., float(items), items), shape),
       dtype=tf.float32)
   return var
Exemplo n.º 23
0
 def test_new_style_audio(self):
   audio = tf.reshape(tf.linspace(0.0, 100.0, 4 * 10 * 2), (4, 10, 2))
   op = audio_summary.op('k488',
                         tf.cast(audio, tf.float32),
                         sample_rate=44100,
                         display_name='Piano Concerto No.23',
                         description='In **A major**.')
   value = self._value_from_op(op)
   assert value.HasField('tensor'), value
   self._assert_noop(value)
Exemplo n.º 24
0
 def _create_features(self):
   """Creates the coordinates for resampling. If field_transform is
   None, these are constant and are created in field space; otherwise,
   the final coordinates will be transformed by an input tensor
   representing a transform from output coordinates to field
   coordinates, so they are created are created in output coordinate
   space
   """
   embedded_output_shape = list(self._output_shape)+[1]*(len(self._source_shape) - len(self._output_shape))
   embedded_coeff_shape = list(self._coeff_shape)+[1]*(len(self._source_shape) - len(self._output_shape))
   if self._field_transform==None and self._interpolation == 'BSPLINE':
     range_func= lambda f,x: tf.linspace(1.,f-2.,x)
   elif self._field_transform==None and self._interpolation != 'BSPLINE':
     range_func= lambda f,x: tf.linspace(0.,f-1.,x)
   else:
     range_func= lambda f,x: np.arange(x,dtype=np.float32)
     embedded_output_shape+=[1] # make homogeneous
     embedded_coeff_shape+=[1]
   ranges = [range_func(f,x) for f,x in zip(embedded_coeff_shape,embedded_output_shape)]
   coords= tf.stack([tf.reshape(x,[1,-1]) for x in tf.meshgrid(*ranges, indexing='ij')],2)
   return coords
Exemplo n.º 25
0
  def test_on_1d_array_log_spaced_grid(self):
    x_min = 1.
    x_max = 100000.
    num_pts = 10

    # With only 10 interpolating points between x_ref = 1 and 100000,
    # and y_ref = log(x_ref), we better use a log-spaced grid, or else error
    # will be very bad.
    implied_x_ref = tf.exp(tf.linspace(tf.log(x_min), tf.log(x_max), num_pts))
    y_ref = tf.log(implied_x_ref)

    x = tf.linspace(x_min + 0.123, x_max, 20)
    y_expected = tf.log(x)

    with self.test_session():
      y = tfp.math.interp_regular_1d_grid(
          x, x_min, x_max, y_ref, grid_regularizing_transform=tf.log)
      self.assertAllEqual(y_expected.shape, y.shape)
      y_ = self.evaluate(y)
      # Super duper accuracy!  Note accuracy was not good if I did not use the
      # grid_regularizing_transform.
      self.assertAllClose(y_, y_expected, atol=0, rtol=1e-6)
Exemplo n.º 26
0
 def generate_boxes(fm_side, scale, aspect_ratios=[]):
     """generates a regular grid fm_size * fm_size of bboxes
     corresponding to the current scale"""
     stride_space = tf.linspace(0.5 / fm_side, 1 - 0.5 / fm_side, fm_side)
     yv, xv = tf.meshgrid(stride_space, stride_space, indexing='ij')
     h_s, w_s = tf.zeros_like(xv) + scale, tf.zeros_like(xv) + scale
     xywh_space = tf.stack([xv, yv, w_s, h_s], 2)
     bbox_set = tf.reshape(xywh_space, [fm_side, fm_side, 1, 4])
     priors = [bbox_set]
     for aspect_ratio in aspect_ratios:
         # using a reference grid of square bboxes generates asymmetric bboxes
         priors.append(adjust_for_aspect_ratio(bbox_set, aspect_ratio))
     return priors
  def testRWM2DMixNormal(self):
    """Sampling from a 2-D Mixture Normal Distribution."""
    dtype = np.float32

    # By symmetry, target has mean [0, 0]
    # Therefore, Var = E[X^2] = E[E[X^2 | c]], where c is the component.
    # Now..., for the first component,
    #   E[X1^2] =  Var[X1] + Mean[X1]^2
    #           =  0.1^2 + 1^2,
    # and similarly for the second.  As a result,
    # Var[mixture] = 1.01.
    target = tfd.MixtureSameFamily(
        mixture_distribution=tfd.Categorical(probs=[0.5, 0.5]),
        components_distribution=tfd.MultivariateNormalDiag(
            # Mixture components are 20 standard deviations apart!
            loc=[[-1., -1], [1., 1.]],
            scale_identity_multiplier=[0.1, 0.1]))

    def make_kernel_fn(target_log_prob_fn, seed):
      return tfp.mcmc.HamiltonianMonteCarlo(
          target_log_prob_fn=target_log_prob_fn,
          seed=seed,
          step_size=0.3,
          num_leapfrog_steps=3)

    remc = tfp.mcmc.ReplicaExchangeMC(
        target_log_prob_fn=target.log_prob,
        # Verified that test fails if inverse_temperatures = [1]
        inverse_temperatures=10.**tf.linspace(0., -2., 5),
        make_kernel_fn=make_kernel_fn,
        seed=_set_seed(888))

    samples, _ = tfp.mcmc.sample_chain(
        num_results=2000,
        # Start at one of the modes, in order to make mode jumping necessary
        # if we want to pass test.
        current_state=np.ones(2, dtype=dtype),
        kernel=remc,
        num_burnin_steps=500,
        parallel_iterations=1)  # For determinism.
    self.assertAllEqual((2000, 2), samples.shape)

    sample_mean = tf.reduce_mean(samples, axis=0)
    sample_std = tf.sqrt(
        tf.reduce_mean(tf.squared_difference(samples, sample_mean), axis=0))
    [sample_mean_, sample_std_] = self.evaluate([sample_mean, sample_std])

    self.assertAllClose(sample_mean_, [0., 0.], atol=0.3, rtol=0.3)
    self.assertAllClose(
        sample_std_, [np.sqrt(1.01), np.sqrt(1.01)], atol=0.1, rtol=0.1)
Exemplo n.º 28
0
def main(_):
  sns.set(color_codes=True)
  ed.set_seed(42)

  # DATA. We use a placeholder to represent a minibatch. During
  # inference, we generate data on the fly and feed `x_ph`.
  x_ph = tf.placeholder(tf.float32, [FLAGS.M, 1])

  # MODEL
  with tf.variable_scope("Gen"):
    eps = tf.linspace(-8.0, 8.0, FLAGS.M) + 0.01 * tf.random_normal([FLAGS.M])
    eps = tf.reshape(eps, [FLAGS.M, 1])
    x = generative_network(eps)

  # INFERENCE
  optimizer = tf.train.GradientDescentOptimizer(0.03)
  optimizer_d = tf.train.GradientDescentOptimizer(0.03)

  inference = ed.WGANInference(
      data={x: x_ph}, discriminator=discriminative_network)
  inference.initialize(
      optimizer=optimizer, optimizer_d=optimizer_d, penalty=0.1,
      n_iter=1000)
  tf.global_variables_initializer().run()

  for _ in range(inference.n_iter):
    x_data = next_batch(FLAGS.M).reshape([FLAGS.M, 1])
    for _ in range(5):
      info_dict_d = inference.update(feed_dict={x_ph: x_data}, variables="Disc")

    info_dict = inference.update(feed_dict={x_ph: x_data}, variables="Gen")
    info_dict['t'] = info_dict['t'] // 6  # say set of 6 updates is 1 iteration
    info_dict['loss_d'] = info_dict_d['loss_d']  # get disc loss from update
    inference.print_progress(info_dict)

  # CRITICISM
  db, pd, pg = get_samples(x_ph)
  db_x = np.linspace(-8, 8, len(db))
  p_x = np.linspace(-8, 8, len(pd))
  f, ax = plt.subplots(1)
  ax.plot(db_x, db, label="Decision boundary")
  ax.set_ylim(0, 1)
  plt.plot(p_x, pd, label="Real data")
  plt.plot(p_x, pg, label="Generated data")
  plt.title("1D Generative Adversarial Network")
  plt.xlabel("Data values")
  plt.ylabel("Probability density")
  plt.legend()
  plt.show()
  def testLogProbSameFor1D(self):
    # 1D MVT is exactly a regular Student's T distribution.
    t_dist = student_t.StudentT(
        df=self._input(5.), loc=self._input(2.), scale=self._input(3.))
    scale = tf.linalg.LinearOperatorDiag([self._input(3.)])
    mvt_dist = mvt.MultivariateStudentTLinearOperator(
        loc=[self._input(2.)], df=self._input(5.), scale=scale)

    test_points = tf.cast(tf.linspace(-10.0, 10.0, 100), self.dtype)

    t_log_probs = self.evaluate(t_dist.log_prob(test_points))
    mvt_log_probs = self.evaluate(
        mvt_dist.log_prob(test_points[..., tf.newaxis]))

    self.assertAllClose(t_log_probs, mvt_log_probs)
Exemplo n.º 30
0
 def _compute_quantiles():
   """Helper to build quantiles."""
   # Omit {0, 1} since they might lead to Inf/NaN.
   zero = tf.zeros([], dtype=dist.dtype)
   edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
   # Expand edges so its broadcast across batch dims.
   edges = tf.reshape(
       edges,
       shape=tf.concat(
           [[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
   quantiles = dist.quantile(edges)
   # Cyclically permute left by one.
   perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
   quantiles = tf.transpose(quantiles, perm)
   return quantiles
Exemplo n.º 31
0
import tensorflow as tf
import os #忽略CPU AVX2訊息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
a=tf.zeros([2, 3], tf.int32)
b=tf.zeros_like(a, dtype=None, name=None) #我要一個零矩陣維度跟指定的變數一樣
c=tf.fill([2, 3], 8)
d=tf.linspace(10.0, 13.0, 10) #下限 上限 切幾塊
e=tf.range(1, limit=10, delta=1, dtype=None,name='range') #下陷 上限 一刀切多少
a1=tf.constant([[1,2,3],[4,5,6],[7,8,9]], tf.float32)
a2=tf.constant([[1,2,3],[4,5,6],[7,8,9]], tf.float32)
f=tf.matmul(a1,a2)
g=tf.linalg.det(f).numpy() #det必須要float32的格式

print(g)
Exemplo n.º 32
0
zero_similar = tf.Variable(tf.zeros_like(zero_var))
ones_similar = tf.Variable(tf.ones_like(ones_var))

sess.run(ones_similar.initializer)
sess.run(zero_similar.initializer)

# Fill shape with a constant
fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1))

# Create a variable from a constant
const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9]))
# This can also be used to fill an array:
const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim]))

# Sequence generation
linear_var = tf.Variable(tf.linspace(
    start=0.0, stop=1.0, num=3))  # Generates [0.0, 0.5, 1.0] includes the end

sequence_var = tf.Variable(
    tf.range(start=6, limit=15,
             delta=3))  # Generates [6, 9, 12] doesn't include the end

# Random Numbers

# Random Normal
rnorm_var = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0)

# Add summaries to tensorboard
merged = tf.summary.merge_all()

# Initialize graph writer:
Exemplo n.º 33
0
 def _LinSpace(self, start, stop, num):
     with self.test_session():
         tf_ans = tf.linspace(start, stop, num, name="linspace")
         self.assertEqual([num], tf_ans.get_shape())
         return tf_ans.eval()
Exemplo n.º 34
0
"""## `linear_lookup()`

Synthesize audio with an array of sinusoidal oscillators. Frequencies and amplitudes must be provided at audio rate.

### Ex: Sinusoidal lookup

As a simple example, lookup from a sin-wave wavetable produces a sin wave at the lookup frequency
"""

n_samples = int(sample_rate * 1.0)
n_wavetable = 2048
n_cycles = 440

# Sin wave
wavetable = tf.sin(tf.linspace(0.0, 2.0 * np.pi, n_wavetable))
wavetable = wavetable[tf.newaxis, tf.newaxis, :]

phase = tf.linspace(0.0, n_cycles, n_samples) % 1.0
phase = phase[tf.newaxis, :, tf.newaxis]

output = ddsp.core.linear_lookup(phase, wavetable)
target = np.sin(np.linspace(0.0, 2.0 * np.pi * n_cycles, n_samples))

# For plotting.
output = output[0]
phase = phase[0, :, 0]

# Plot the results
plt.figure(figsize=(12, 6))
batch_size = 2
s = tf.placeholder(tf.float32, [1])
gt = tf.placeholder(tf.float32, [2, 3, 3, 7])
t = tf.Variable([[4., 9., 16., 25., 30.], [5., 10., 17., 26., 31.]],
                tf.float32)
print("t.shape:", t.shape)

d = 3
w = 3

z_b = tf.tile(t, [1, d * w])
print("z_b.shape:", z_b.shape)
matrix = tf.reshape(z_b, [batch_size, d, w, 5])
print("matrix.shape: ", matrix.shape)

x = tf.linspace(tf.constant(-1, tf.float32), tf.constant(1, tf.float32), w)
print('x:', x)
y = tf.linspace(tf.constant(-1, tf.float32), tf.constant(1, tf.float32), w)
print('y:', y)

xb, yb = tf.meshgrid(x, y)
print("xb: ", xb)

xb_dim1 = tf.expand_dims(xb, 2)
print("xb_dim1.shape: ", xb_dim1.shape)
#xb_const = tf.stop_gradient(xb_dim1)
xb_const = xb_dim1
print("xb_const: ", xb_const)

yb_dim1 = tf.expand_dims(yb, 2)
print("yb_dim1.shape: ", yb_dim1.shape)
def main():

    # the data is assumed to have been sampled from a 2-model gaussian mixtured model (GMM) distribution
    # inference the parameters of the gaussian mixtured model (GMM).
    # download observation data and convert to tensor
    url = 'https://raw.githubusercontent.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/master/Chapter3_MCMC/data/mixture_data.csv'
    filename = wget.download(url)
    data = np.loadtxt(filename, delimiter=',')
    data = tf.constant(data, dtype=tf.float32)

    step_size = tf.Variable(0.5, dtype=tf.float32, trainable=False)
    bijectors = [
        tfp.bijectors.Identity(),
        tfp.bijectors.Identity(),
        tfp.bijectors.Identity()
    ]
    # inference the posteriori according to the observation with MCMC
    [model1_probs, mus, sigmas], kernel_results = tfp.mcmc.sample_chain(
        num_results=25000,
        num_burnin_steps=1000,
        current_state=[
            tf.constant(0.5),
            tf.constant([120., 190.]),
            tf.constant([10., 10.])
        ],
        kernel=tfp.mcmc.TransformedTransitionKernel(
            inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
                target_log_prob_fn=log_prob_generator(data),
                num_leapfrog_steps=2,
                step_size=step_size,
                step_size_update_fn=tfp.mcmc.
                make_simple_step_size_update_policy(num_adaptation_steps=1000),
                state_gradients_are_stopped=True),
            bijector=bijectors))

    print('acceptance rate: %f' % tf.math.reduce_mean(
        tf.cast(kernel_results.inner_results.is_accepted, dtype=tf.float32)))
    print('final step size: %f' % tf.math.reduce_mean(
        kernel_results.inner_results.extra.step_size_assign[-100:]))

    # plot the samples
    # for pretty colors later in the book.
    colors = ['#5DA5DA', '#F15854'
              ] if mus[-1, 0] > mus[-1, 1] else ['#F15854', '#5DA5DA']
    plt.figure(figsize(12.5, 9))

    # plot means of two models.
    plt.subplot(311)
    plt.plot(mus[:, 0], label="trace of center 0", c=colors[0], lw=1)
    plt.plot(mus[:, 1], label="trace of center 1", c=colors[1], lw=1)
    plt.title("Traces of unknown parameters")
    leg = plt.legend(loc="upper right")
    leg.get_frame().set_alpha(0.7)

    # plot sigmas of two models.
    plt.subplot(312)
    plt.plot(sigmas[:, 0],
             label="trace of standard deviation of cluster 0",
             c=colors[0],
             lw=1)
    plt.plot(sigmas[:, 1],
             label="trace of standard deviation of cluster 1",
             c=colors[1],
             lw=1)
    plt.legend(loc="upper left")

    # plot mixture probability of GMM.
    plt.subplot(313)
    plt.plot(model1_probs,
             label="$p$: frequency of assignment to cluster 0",
             c='#60BD68',
             lw=1)
    plt.xlabel("Steps")
    plt.ylim(0, 1)
    plt.legend()

    plt.show()

    # sample 50000 extra samples.
    # inference the posteriori according to the observation with MCMC
    [
        model1_probs_extended, mus_extended, sigmas_extended
    ], kernel_results = tfp.mcmc.sample_chain(
        num_results=50000,
        num_burnin_steps=0,
        current_state=[model1_probs[-1], mus[-1], sigmas[-1]],
        kernel=tfp.mcmc.TransformedTransitionKernel(
            inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
                target_log_prob_fn=log_prob_generator(data),
                num_leapfrog_steps=2,
                step_size=step_size,
                step_size_update_fn=tfp.mcmc.
                make_simple_step_size_update_policy(num_adaptation_steps=1000),
                state_gradients_are_stopped=True),
            bijector=bijectors))

    print('acceptance rate: %f' % tf.math.reduce_mean(
        tf.cast(kernel_results.inner_results.is_accepted, dtype=tf.float32)))
    print('final step size: %f' % tf.math.reduce_mean(
        kernel_results.inner_results.extra.step_size_assign[-100:]))

    plt.figure(figsize(12.5, 4))

    # draw the first 25000 samples
    x = tf.range(25000)
    plt.plot(x,
             mus[:, 0],
             label='previous trace of center 0',
             lw=1,
             alpha=0.4,
             c=colors[1])
    plt.plot(x,
             mus[:, 1],
             label='previous trace of center 1',
             lw=1,
             alpha=0.4,
             c=colors[0])
    # draw the following 50000 samples
    x = tf.range(25000, 75000)
    plt.plot(x,
             mus_extended[:, 0],
             label='new trace of center 0',
             lw=1,
             c='#5DA5DA')
    plt.plot(x,
             mus_extended[:, 1],
             label='new trace of center 1',
             lw=1,
             c='#F15854')

    plt.title('Traces of unknown center parameters')
    leg = plt.legend(loc='upper right')
    leg.get_frame().set_alpha(0.8)
    plt.xlabel('Steps')

    plt.show()

    plt.figure(figsize(12.5, 8))
    for i in range(2):
        plt.subplot(2, 2, 2 * i + 1)
        plt.title('Posterior of center of cluster %d' % i)
        plt.hist(mus_extended[:, i],
                 color=colors[i],
                 bins=30,
                 histtype='stepfilled')

        plt.subplot(2, 2, 2 * i + 2)
        plt.title('Posterior of standard deviation of cluster %d' % i)
        plt.hist(sigmas_extended[:, 1],
                 color=colors[i],
                 bins=30,
                 histtype='stepfilled')
    plt.tight_layout()

    plt.show()

    dist_0 = tfp.distributions.Normal(
        loc=tf.math.reduce_mean(mus_extended[:, 0]),
        scale=tf.math.reduce_mean(sigmas_extended[:, 0]))
    dist_1 = tfp.distributions.Normal(
        loc=tf.math.reduce_mean(mus_extended[:, 1]),
        scale=tf.math.reduce_mean(sigmas_extended[:, 1]))
    prob_assignment_1 = dist_0.prob(data)
    prob_assignment_2 = dist_1.prob(data)
    probs_assignments = 1. - (prob_assignment_1 /
                              (prob_assignment_1 + prob_assignment_2))
    probs_assignments_inv = 1. - probs_assignments
    # cluster_probs.shape = (data.shape[0], 2)
    cluster_probs = tf.transpose(
        tf.stack([probs_assignments, probs_assignments_inv]))
    # burned_assignment_trace.shape = (300, data.shape[0])
    burned_assignment_trace = tfp.distributions.Categorical(
        probs=cluster_probs).sample(sample_shape=300)

    plt.figure(figsize(12.5, 5))
    plt.cmap = mpl.colors.ListedColormap(colors)
    output = tf.stack([
        tf.gather(burned_assignment_trace[i, ...], tf.argsort(data))
        for i in tf.range(burned_assignment_trace.shape[0])
    ])
    plt.imshow(output.numpy(), cmap=plt.cmap, aspect=.4, alpha=.9)
    plt.xticks(
        tf.range(0, data.shape[0], 40).numpy(),
        ["%.2f" % s for s in tf.sort(data)[::40]])
    plt.ylabel('posterior sample')
    plt.xlabel('value of $i$th data point')
    plt.title('Posterior labels of data points')

    plt.show()

    plt.figure(figsize(12.5, 5))
    assign_trace = tf.gather(probs_assignments, tf.argsort(data))
    plt.scatter(tf.gather(data, tf.argsort(data)).numpy(),
                assign_trace.numpy(),
                cmap=mpl.colors.LinearSegmentedColormap.from_list(
                    'BMH', colors),
                c=(1 - assign_trace).numpy(),
                s=50)
    plt.ylim(-.05, 1.05)
    plt.title('Probability of data point belonging to cluster 0')
    plt.ylabel('probability')
    plt.xlabel('value of data point')

    plt.show()

    x = tf.linspace(20., 300., 500.)
    mu_mean = tf.math.reduce_mean(mus_extended, axis=-1)
    sigma_mean = tf.math.reduce_mean(sigmas_extended, axis=-1)
    prob_mean = tf.math.reduce_mean(model1_probs_extended, axis=-1)

    plt.hist(data.numpy(),
             bins=20,
             histtype='step',
             density=True,
             color='k',
             lw=2,
             label='histogram of data')
    y = prob_mean * tfp.distributions.Normal(loc=mu_mean[0],
                                             scale=sigma_mean[0]).prob(x)
    plt.plot(x.numpy(),
             y.numpy(),
             label='Cluster 0 (using posterior-mean parameters)',
             lw=3)
    plt.fill_between(x.numpy(), y.numpy(), color=colors[1], alpha=0.3)
    y = (1 - prob_mean) * tfp.distributions.Normal(loc=mu_mean[1],
                                                   scale=sigma_mean[1]).prob(x)
    plt.plot(x.numpy(),
             y.numpy(),
             label='Cluster 1 (using posterior-mean parameters)',
             lw=3)
    plt.fill_between(x.numpy(), y.numpy(), color=colors[0], alpha=0.3)
    plt.legend(loc='upper left')
    plt.title('Visualizing Clusters using psterior-mean parameters')
    plt.show()
Exemplo n.º 37
0
def lat_long_grid(shape, epsilon=1.0e-12):
    return tf.meshgrid(
        tf.linspace(-np.pi + np.pi / shape[1], np.pi - np.pi / shape[1],
                    shape[1]),
        tf.linspace(-np.pi / 2.0 + np.pi / (2 * shape[0]),
                    np.pi / 2.0 - np.pi / (2 * shape[0]), shape[0]))
Exemplo n.º 38
0
def linspace(*args, **kwargs):
    a = _tf.linspace(*args, **kwargs)
    if a.dtype is float64:
        a = cast(a, dtype=float32)

    return a
Exemplo n.º 39
0
    def __init__(self,
                 sess,
                 num_actions,
                 num_atoms=51,
                 vmax=10.,
                 gamma=0.99,
                 update_horizon=1,
                 min_replay_history=20000,
                 update_period=4,
                 target_update_period=8000,
                 epsilon_fn=dqn_agent.linearly_decaying_epsilon,
                 epsilon_train=0.01,
                 epsilon_eval=0.001,
                 epsilon_decay_period=250000,
                 replay_scheme='prioritized',
                 tf_device='/cpu:*',
                 use_staging=True,
                 optimizer=tf.train.AdamOptimizer(learning_rate=0.00025,
                                                  epsilon=0.0003125),
                 summary_writer=None,
                 summary_writing_frequency=500,
                 hsv_color=False):

        print('--------in RainbowRGBAgent------')
        print('min_replay_history = ', min_replay_history)
        # tf.logging.info('Creating %s agent with the following parameters:',
        #                 self.__class__.__name__)
        # tf.logging.info('\t gamma: %f', gamma)
        # tf.logging.info('\t update_horizon: %f', update_horizon)
        # tf.logging.info('\t min_replay_history: %d', min_replay_history)
        # tf.logging.info('\t update_period: %d', update_period)
        # tf.logging.info('\t target_update_period: %d', target_update_period)
        # tf.logging.info('\t epsilon_train: %f', epsilon_train)
        # tf.logging.info('\t epsilon_eval: %f', epsilon_eval)
        # tf.logging.info('\t epsilon_decay_period: %d', epsilon_decay_period)
        # tf.logging.info('\t tf_device: %s', tf_device)
        # tf.logging.info('\t use_staging: %s', use_staging)
        # tf.logging.info('\t optimizer: %s', optimizer)
        # We need this because some tools convert round floats into ints.
        vmax = float(vmax)
        self._num_atoms = num_atoms
        self._support = tf.linspace(-vmax, vmax, num_atoms)
        self._replay_scheme = replay_scheme
        # TODO(b/110897128): Make agent optimizer attribute private.
        self.optimizer = optimizer

        self.num_actions = num_actions
        self.gamma = gamma
        self.update_horizon = update_horizon
        self.cumulative_gamma = math.pow(gamma, update_horizon)
        self.min_replay_history = min_replay_history
        self.target_update_period = target_update_period
        self.epsilon_fn = epsilon_fn
        self.epsilon_train = epsilon_train
        self.epsilon_eval = epsilon_eval
        self.epsilon_decay_period = epsilon_decay_period
        self.update_period = update_period
        self.eval_mode = False
        self.training_steps = 0
        self.optimizer = optimizer
        self.summary_writer = summary_writer
        self.summary_writing_frequency = summary_writing_frequency

        self.hsv_color = hsv_color
        with tf.device(tf_device):
            state_shape = [
                1, dqn_agent.OBSERVATION_SHAPE[0],
                dqn_agent.OBSERVATION_SHAPE[1], dqn_agent.OBSERVATION_SHAPE[2],
                dqn_agent.OBSERVATION_SHAPE[3] * dqn_agent.STACK_SIZE
            ]
            self.state = np.zeros(state_shape)
            self.state_ph = tf.placeholder(tf.uint8,
                                           state_shape,
                                           name='state_ph')

            self._replay = self._build_replay_buffer(use_staging)

            self._build_networks()

            self._train_op = self._build_train_op()
            self._sync_qt_ops = self._build_sync_op()

            print('self.state_ph= ', self.state_ph)

        if self.summary_writer is not None:
            # All tf.summaries should have been defined prior to running this.
            self._merged_summaries = tf.summary.merge_all()
        self._sess = sess
        self._saver = tf.train.Saver(max_to_keep=3)

        # Variables to be initialized by the agent once it interacts with the
        # environment.
        self._observation = None
        self._last_observation = None
Exemplo n.º 40
0
def theta_y_grid(shape):
    return tf.meshgrid(tf.linspace(-np.pi, np.pi, shape[1]),
                       tf.linspace(-1., 1., shape[0]))
Exemplo n.º 41
0
def uv_grid(shape):
    return tf.meshgrid(
        tf.linspace(-1. + 1. / shape[1], 1. - 1. / shape[1], shape[1]),
        tf.linspace(-1. + 1. / shape[0], 1. - 1. / shape[0], shape[0]))
Exemplo n.º 42
0
def main(n):
    cache = "set-systems/%d-%d.npy" % (n, n)
    slices_np = np.load(open(cache, "rb"))
    slices_np = slices_np.reshape((-1, n * n))
    print(slices_np.shape)
    slices = tf.constant(slices_np, dtype=tf.float32)

    def constraint(x):
        # x /= tf.reduce_sum(x)
        x = tf.clip_by_value(x, 0., 1.)
        return x

    coeffs = tf.Variable(np.ones(10), dtype=tf.float32)

    parametric = False
    if parametric:
        xvar = tf.expand_dims(tf.linspace(-1., 1., n), 0)
        yvar = tf.expand_dims(tf.linspace(-1., 1., n), 1)

        # these are our building blocks:
        X1 = tf.abs(xvar)
        X2 = xvar * xvar
        Y1 = tf.abs(yvar)
        Y2 = yvar * yvar
        D1 = X1 + Y1
        D2 = X2 + Y2
        Dinf = tf.maximum(X1, Y1)

        c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 = coeffs[0], coeffs[1], coeffs[
            2], coeffs[3], coeffs[4], coeffs[5], coeffs[6], coeffs[7], coeffs[
                8], coeffs[9]

        V = D1
        # for n=20: poly(Dinf) ~ 13.01, poly(D1) ~ 13.23, poly(D2) ~ 13.43.
        lag = c0 + c1 * V + c2 * V * V + c3 * V * V * V + c4 * V * V * V * V

        lag = c0 + c1 * X2 + c2 * Y2 + c3 * X2 * Y2

        # lag = c0 + c1 * R2 + c2 * R2 * R2 + c3 * R2 * R2 * R2 + c4 * R2 * R2 * R2 * R2
        lag = c0 + c1 * X2 + c2 * Y2 + c3 * X2 * Y2 + c4 * X2 * X2 + c5 * Y2 * Y2 + c6 * X2 * X2 * Y2 + c7 * X2 * Y2 * Y2 + c8 * X2 * X2 * Y2 * Y2
        # lag = c0 + c1 * X2 + c1 * Y2 + c3 * X2 * Y2
        # lag = c0 * 0 + X2 + Y2 - 2 * X2 * Y2

        lag = c0 + c1 * X1 + c2 * Y1 + c3 * X1 * Y1 + c4 * X1 * X1 + c5 * Y1 * Y1 + c6 * X1 * X1 * Y1 + c7 * X1 * Y1 * Y1 + c8 * X1 * X1 * Y1 * Y1

        # lag = c0 + c1 * Dinf + c2 * tf.sqrt(D2) + c3 * D1 + c4 * X2 + c5 * Y2 + c6 * X2 * Y2

        lag = tf.reshape(lag, [-1])
    else:
        # this is the nonparametric solution:
        lag = tf.Variable(np.ones(n * n),
                          dtype=tf.float32,
                          constraint=constraint)

    target = tf.reduce_max(tf.tensordot(slices, lag, axes=1)) / tf.reduce_sum(
        tf.nn.relu(lag))

    global_step = tf.Variable(0, trainable=False)
    starter_learning_rate = 0.001
    learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate,
                                                         global_step,
                                                         10000,
                                                         0.6,
                                                         staircase=True)

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    train_step = optimizer.minimize(target, global_step=global_step)
    with tf.Session() as sess:
        init = tf.initialize_all_variables()
        sess.run(init)

        for i in range(100000):
            sess.run(train_step)
            if i % 1000 == 0:
                target_val, lag_val = sess.run([target, lag])
                print(i, 1.0 / target_val, lag_val.min(), lag_val.max(),
                      lag_val.sum())
        best_lag = sess.run(lag).reshape((n, n))
        np.save(open("lagrangian-tensorflow.%d-%d.npy" % shape, "wb"),
                best_lag)
        best_coeffs = sess.run(coeffs)
        print("best_coeffs", best_coeffs)
        plt.imshow(best_lag)
        plt.show()
Exemplo n.º 43
0
import matplotlib.pyplot as plt


def func(x):
    """

    :param x: [b, 2]
    :return:
    """
    z = tf.math.sin(x[..., 0]) + tf.math.sin(x[..., 1])

    return z


x = tf.linspace(0., 2 * 3.14, 500)
y = tf.linspace(0., 2 * 3.14, 500)
# [50, 50]
point_x, point_y = tf.meshgrid(x, y)
# [50, 50, 2]
points = tf.stack([point_x, point_y], axis=2)
# points = tf.reshape(points, [-1, 2])
print('points:', points.shape)
z = func(points)
print('z:', z.shape)

plt.figure('plot 2d func value')
plt.imshow(z, origin='lower', interpolation='none')
plt.colorbar()

plt.figure('plot 2d func contour')
Exemplo n.º 44
0
def inference(images,
              cams,
              depth_num,
              depth_start,
              depth_interval,
              network_mode,
              is_master_gpu=True,
              trainable=True,
              inverse_depth=False):
    """ infer depth image from multi-view images and cameras """

    # dynamic gpu params
    depth_end = depth_start + \
        (tf.cast(depth_num, tf.float32) - 1) * depth_interval

    # reference image
    ref_image = tf.squeeze(tf.slice(images, [0, 0, 0, 0, 0],
                                    [-1, 1, -1, -1, 3]),
                           axis=1)
    ref_cam = tf.squeeze(tf.slice(cams, [0, 0, 0, 0, 0], [-1, 1, 2, 4, 4]),
                         axis=1)

    # image feature extraction
    reuse = not is_master_gpu
    ref_tower = UNetDS2GN({'data': ref_image},
                          trainable=trainable,
                          mode=network_mode,
                          reuse=reuse)
    view_towers = []
    for view in range(1, FLAGS.view_num):
        view_image = tf.squeeze(tf.slice(images, [0, view, 0, 0, 0],
                                         [-1, 1, -1, -1, -1]),
                                axis=1)
        view_tower = UNetDS2GN({'data': view_image},
                               trainable=trainable,
                               mode=network_mode,
                               reuse=True)
        view_towers.append(view_tower)
    """
    cam_residuals = []
    for view in range(1, FLAGS.view_num): 
        view_cam = tf.squeeze(
            tf.slice(cams, [0, view, 0, 0, 0], [-1, 1, 2, 4, 4]), axis=1)
        view_image = tf.squeeze(
            tf.slice(images, [0, view, 0, 0, 0], [-1, 1, -1, -1, -1]), axis=1)
        cam_residual = OdometryNet(view_image, ref_image, view_cam)
        cam_residuals.append(view_image, ref_image)  
    how should we encode the view_cam into the camera network? I could do an encoding similar 
    to the one used in the gqn, where you basically concatenate the images and have a few layers
    with convolution and downsampling which then outputs multiple channels that are the size of 
    a camera pose matrix, and you concatenate those layers with the view_cam pose estimate and 
    then have a few densely connected layers to regress a final pose residual. Another option is to rectify the images
    with the estimate and then regress the residual. If there is a clean way to do this then I think this is the best option. I might be able to do the 
    rectification using a homograph at a fixed depth, say 1.0 meters out.

    """

    # get all homographies
    view_homographies = []
    for view in range(1, FLAGS.view_num):
        view_cam = tf.squeeze(tf.slice(cams, [0, view, 0, 0, 0],
                                       [-1, 1, 2, 4, 4]),
                              axis=1)
        if inverse_depth:
            homographies = get_homographies_inv_depth(ref_cam,
                                                      view_cam,
                                                      depth_num=depth_num,
                                                      depth_start=depth_start,
                                                      depth_end=depth_end)
        else:
            homographies = get_homographies(ref_cam,
                                            view_cam,
                                            depth_num=depth_num,
                                            depth_start=depth_start,
                                            depth_interval=depth_interval)
        view_homographies.append(homographies)

    # build cost volume by differentialble homography
    with tf.name_scope('cost_volume_homography'):
        depth_costs = []
        for d in range(depth_num):
            # compute cost (variation metric)
            ave_feature = ref_tower.get_output()
            ave_feature2 = tf.square(ref_tower.get_output())
            for view in range(0, FLAGS.view_num - 1):
                homography = tf.slice(view_homographies[view],
                                      begin=[0, d, 0, 0],
                                      size=[-1, 1, 3, 3])
                homography = tf.squeeze(homography, axis=1)
                # warped_view_feature = homography_warping(view_towers[view].get_output(), homography)
                warped_view_feature = tf_transform_homography(
                    view_towers[view].get_output(), homography)
                ave_feature = ave_feature + warped_view_feature
                ave_feature2 = ave_feature2 + tf.square(warped_view_feature)
            ave_feature = ave_feature / FLAGS.view_num
            ave_feature2 = ave_feature2 / FLAGS.view_num
            cost = ave_feature2 - tf.square(ave_feature)
            depth_costs.append(cost)
        cost_volume = tf.stack(depth_costs, axis=1)

    # filtered cost volume, size of (B, D, H, W, 1)
    filtered_cost_volume_tower = RegNetUS0({'data': cost_volume},
                                           trainable=trainable,
                                           mode=network_mode,
                                           reuse=reuse)
    filtered_cost_volume = tf.squeeze(filtered_cost_volume_tower.get_output(),
                                      axis=-1)

    # depth map by softArgmin
    with tf.name_scope('soft_arg_min'):
        # probability volume by soft max
        probability_volume = tf.nn.softmax(tf.scalar_mul(
            -1, filtered_cost_volume),
                                           axis=1,
                                           name='prob_volume')
        # depth image by soft argmin
        volume_shape = tf.shape(probability_volume)
        soft_2d = []
        for i in range(FLAGS.batch_size):
            if inverse_depth:
                inv_depth_start = tf.reshape(tf.div(1.0, depth_start[i]), [])
                inv_depth_end = tf.reshape(tf.div(1.0, depth_end[i]), [])
                inv_depth = tf.lin_space(inv_depth_start, inv_depth_end,
                                         tf.cast(depth_num, tf.int32))
                soft_1d = tf.div(1.0, inv_depth)
            else:
                soft_1d = tf.linspace(depth_start[i], depth_end[i],
                                      tf.cast(depth_num, tf.int32))
            soft_2d.append(soft_1d)
        soft_2d = tf.reshape(tf.stack(soft_2d, axis=0),
                             [volume_shape[0], volume_shape[1], 1, 1])
        soft_4d = tf.tile(soft_2d, [1, 1, volume_shape[2], volume_shape[3]])
        estimated_depth_map = tf.reduce_sum(soft_4d * probability_volume,
                                            axis=1)
        estimated_depth_map = tf.expand_dims(estimated_depth_map, axis=3)

    # probability map
    prob_map = get_probability_map(probability_volume,
                                   estimated_depth_map,
                                   depth_start,
                                   depth_interval,
                                   inverse_depth=inverse_depth)

    return estimated_depth_map, prob_map  #, filtered_depth_map, probability_volume
Exemplo n.º 45
0
    def __init__(self,
                 sess,
                 num_actions,
                 num_atoms=51,
                 vmax=10.,
                 gamma=0.99,
                 update_horizon=1,
                 min_replay_history=20000,
                 update_period=4,
                 target_update_period=8000,
                 epsilon_fn=dqn_agent.linearly_decaying_epsilon,
                 epsilon_train=0.01,
                 epsilon_eval=0.001,
                 epsilon_decay_period=250000,
                 replay_scheme='prioritized',
                 tf_device='/cpu:*',
                 use_staging=True,
                 optimizer=tf.train.AdamOptimizer(learning_rate=0.00025,
                                                  epsilon=0.0003125),
                 summary_writer=None,
                 summary_writing_frequency=500):
        """Initializes the agent and constructs the components of its graph.

    Args:
      sess: `tf.Session`, for executing ops.
      num_actions: int, number of actions the agent can take at any state.
      num_atoms: int, the number of buckets of the value function distribution.
      vmax: float, the value distribution support is [-vmax, vmax].
      gamma: float, discount factor with the usual RL meaning.
      update_horizon: int, horizon at which updates are performed, the 'n' in
        n-step update.
      min_replay_history: int, number of transitions that should be experienced
        before the agent begins training its value function.
      update_period: int, period between DQN updates.
      target_update_period: int, update period for the target network.
      epsilon_fn: function expecting 4 parameters:
        (decay_period, step, warmup_steps, epsilon). This function should return
        the epsilon value used for exploration during training.
      epsilon_train: float, the value to which the agent's epsilon is eventually
        decayed during training.
      epsilon_eval: float, epsilon used when evaluating the agent.
      epsilon_decay_period: int, length of the epsilon decay schedule.
      replay_scheme: str, 'prioritized' or 'uniform', the sampling scheme of the
        replay memory.
      tf_device: str, Tensorflow device on which the agent's graph is executed.
      use_staging: bool, when True use a staging area to prefetch the next
        training batch, speeding training up by about 30%.
      optimizer: `tf.train.Optimizer`, for training the value function.
      summary_writer: SummaryWriter object for outputting training statistics.
        Summary writing disabled if set to None.
      summary_writing_frequency: int, frequency with which summaries will be
        written. Lower values will result in slower training.
    """
        # We need this because some tools convert round floats into ints.
        vmax = float(vmax)
        self._num_atoms = num_atoms
        self._support = tf.linspace(-vmax, vmax, num_atoms)
        self._replay_scheme = replay_scheme
        # TODO(b/110897128): Make agent optimizer attribute private.
        self.optimizer = optimizer

        super(RainbowAgent, self).__init__(
            sess=sess,
            num_actions=num_actions,
            gamma=gamma,
            update_horizon=update_horizon,
            min_replay_history=min_replay_history,
            update_period=update_period,
            target_update_period=target_update_period,
            epsilon_fn=epsilon_fn,
            epsilon_train=epsilon_train,
            epsilon_eval=epsilon_eval,
            epsilon_decay_period=epsilon_decay_period,
            tf_device=tf_device,
            use_staging=use_staging,
            optimizer=self.optimizer,
            summary_writer=summary_writer,
            summary_writing_frequency=summary_writing_frequency)
Exemplo n.º 46
0
import tensorflow as tf

a = tf.linspace(-10., 10., 10)

with tf.GradientTape() as tape:
    tape.watch(a)
    y = tf.sigmoid(a)

grads = tape.gradient(y, [a])
print('x:', a.numpy())
print('y:', y.numpy())
print('grad:', grads[0].numpy())
Exemplo n.º 47
0
    def __init__(
            self,
            time_step_spec,
            action_spec,
            categorical_q_network,
            optimizer,
            min_q_value=-10.0,
            max_q_value=10.0,
            epsilon_greedy=0.1,
            n_step_update=1,
            boltzmann_temperature=None,
            # Params for target network updates
            target_update_tau=1.0,
            target_update_period=1,
            # Params for training.
            td_errors_loss_fn=None,
            gamma=1.0,
            reward_scale_factor=1.0,
            gradient_clipping=None,
            # Params for debugging
            debug_summaries=False,
            summarize_grads_and_vars=False,
            train_step_counter=None,
            name=None):
        """Creates a Categorical DQN Agent.

    Args:
      time_step_spec: A `TimeStep` spec of the expected time_steps.
      action_spec: A `BoundedTensorSpec` representing the actions.
      categorical_q_network: A categorical_q_network.CategoricalQNetwork that
        returns the q_distribution for each action.
      optimizer: The optimizer to use for training.
      min_q_value: A float specifying the minimum Q-value, used for setting up
        the support.
      max_q_value: A float specifying the maximum Q-value, used for setting up
        the support.
      epsilon_greedy: probability of choosing a random action in the default
        epsilon-greedy collect policy (used only if a wrapper is not provided to
        the collect_policy method).
      n_step_update: The number of steps to consider when computing TD error and
        TD loss. Defaults to single-step updates. Note that this requires the
        user to call train on Trajectory objects with a time dimension of
        `n_step_update + 1`. However, note that we do not yet support
        `n_step_update > 1` in the case of RNNs (i.e., non-empty
        `q_network.state_spec`).
      boltzmann_temperature: Temperature value to use for Boltzmann sampling of
        the actions during data collection. The closer to 0.0, the higher the
        probability of choosing the best action.
      target_update_tau: Factor for soft update of the target networks.
      target_update_period: Period for soft update of the target networks.
      td_errors_loss_fn: A function for computing the TD errors loss. If None, a
        default value of huber_loss is used. This function takes as input the
        target and the estimated Q values and returns the loss for each element
        of the batch.
      gamma: A discount factor for future rewards.
      reward_scale_factor: Multiplicative scale for the reward.
      gradient_clipping: Norm length to clip gradients.
      debug_summaries: A bool to gather debug summaries.
      summarize_grads_and_vars: If True, gradient and network variable summaries
        will be written during training.
      train_step_counter: An optional counter to increment every time the train
        op is run.  Defaults to the global_step.
      name: The name of this agent. All variables in this module will fall
        under that name. Defaults to the class name.

    Raises:
      TypeError: If the action spec contains more than one action.
    """
        num_atoms = getattr(categorical_q_network, 'num_atoms', None)
        if num_atoms is None:
            raise TypeError(
                'Expected categorical_q_network to have property '
                '`num_atoms`, but it doesn\'t (note: you likely want to '
                'use a CategoricalQNetwork). Network is: %s' %
                (categorical_q_network, ))

        self._num_atoms = num_atoms
        self._min_q_value = min_q_value
        self._max_q_value = max_q_value
        self._support = tf.linspace(min_q_value, max_q_value, num_atoms)

        super(CategoricalDqnAgent,
              self).__init__(time_step_spec,
                             action_spec,
                             categorical_q_network,
                             optimizer,
                             epsilon_greedy=epsilon_greedy,
                             n_step_update=n_step_update,
                             boltzmann_temperature=boltzmann_temperature,
                             target_update_tau=target_update_tau,
                             target_update_period=target_update_period,
                             td_errors_loss_fn=td_errors_loss_fn,
                             gamma=gamma,
                             reward_scale_factor=reward_scale_factor,
                             gradient_clipping=gradient_clipping,
                             debug_summaries=debug_summaries,
                             summarize_grads_and_vars=summarize_grads_and_vars,
                             train_step_counter=train_step_counter,
                             name=name)

        policy = categorical_q_policy.CategoricalQPolicy(
            min_q_value, max_q_value, self._q_network, self._action_spec)
        if boltzmann_temperature is not None:
            self._collect_policy = boltzmann_policy.BoltzmannPolicy(
                policy, temperature=self._boltzmann_temperature)
        else:
            self._collect_policy = epsilon_greedy_policy.EpsilonGreedyPolicy(
                policy, epsilon=self._epsilon_greedy)
        self._policy = greedy_policy.GreedyPolicy(policy)
Exemplo n.º 48
0
#scatter 有目的性的根据坐标对值进行更新
'''
    tf.scatter_nd(
    indices,      根据坐标改变对应底板上的值
    updates,
    shape         shape给定底板,一维二维多维,所有值默认为0
    
'''
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
#底板上下标4的位置更新为9,下标3的位置更新为10,下标1的位置更新为11,下标7的位置更新为12
shape = tf.contant([8])  #底板是一维,值全为0、长度为8的tensor
tf.scatter_nd(indices, updates, shape)
#返回[0,11,0,10,9,0,0,12]
#tf.scatter_nd只能更新底板为0的值
#scatter使用复杂,详情看课时47、48

#meshgrid  应用在48课时,不好笔述,记得复习
'''
给定x和y的范围,生成[x1,y1],[x2,y2],[x3,y3]...等范围内的坐标
'''
y = tf.linspace(-2., 2., 5)  #生成-2.到2.的五个值,均匀分布
x = tf.linspace(-2., 2., 5)
point_x, point_y = tf.meshgrid(x, y)
point_x.shape  #[5,5],共25个值,每一列上的值都相等
point_y.shape  #[5,5],共25个值,每一行上的值都相等

points = tf.stack([point_x, point_y], axis=2)
#合并point_x和point_y,组成坐标轴,增加一个维度
points.shape  #[5,5,2]5行5列,2个通道,也就是x、y组合成一个坐标点
Exemplo n.º 49
0
print(zero_tensor)

ones_tensor = tf.ones([1, 1])

print(ones_tensor)

fill_tensor = tf.fill([4, 4], 16)
print(fill_tensor)

first_constant = tf.constant([[1, 2, 3], [3, 4, 5]])
print(first_constant)

mess_constant = tf.constant(['Kaiser'])
print(mess_constant)

lins_tensor = tf.linspace(0., 9., 6)
print(lins_tensor)

range_tensor = tf.range(3., 6., 0.5)
print(range_tensor)

mat = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])

slic = tf.slice(mat, [1, 1], [2, 2])
print(slic)

mat = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
rev = tf.reverse(mat, [0])
print(rev)

x = tf.constant([1., 2., 3.])
Exemplo n.º 50
0
            def regDLF(y_true,
                       y_pred,
                       alpha=1,
                       beta=1,
                       gamma=0.01,
                       delta_v=0.5,
                       delta_d=1.5,
                       name='loss_discrim'):
                def tf_norm(inputs, axis=1, epsilon=1e-7, name='safe_norm'):
                    squared_norm = tf.reduce_sum(tf.square(inputs),
                                                 axis=axis,
                                                 keep_dims=True)
                    safe_norm = tf.sqrt(squared_norm + epsilon)
                    return tf.identity(safe_norm, name=name)

                ###

                lins = tf.linspace(0.0,
                                   DIMZ * DIMY * DIMX,
                                   DIMZ * DIMY * DIMX,
                                   name='lins')
                lins = lins / tf.reduce_max(lins) * 255
                lins = cvt2tanh(lins)
                lins = tf.reshape(lins, tf.shape(y_true), name='lins_3d')
                print lins

                y_true = tf.reshape(y_true, [DIMZ * DIMY * DIMX])
                y_pred = tf.concat([y_pred, lins], axis=-1)

                nDim = tf.shape(y_pred)[-1]
                X = tf.reshape(y_pred, [DIMZ * DIMY * DIMX, nDim])
                uniqueLabels, uniqueInd = tf.unique(y_true)

                numUnique = tf.size(
                    uniqueLabels)  # Get the number of connected component

                Sigma = tf.unsorted_segment_sum(X, uniqueInd, numUnique)
                # ones_Sigma = tf.ones((tf.shape(X)[0], 1))
                ones_Sigma = tf.ones_like(X)
                ones_Sigma = tf.unsorted_segment_sum(ones_Sigma, uniqueInd,
                                                     numUnique)
                mu = tf.divide(Sigma, ones_Sigma)

                Lreg = tf.reduce_mean(tf.norm(mu, axis=1, ord=1))

                T = tf.norm(tf.subtract(tf.gather(mu, uniqueInd), X),
                            axis=1,
                            ord=1)
                T = tf.divide(T, Lreg)
                T = tf.subtract(T, delta_v)
                T = tf.clip_by_value(T, 0, T)
                T = tf.square(T)

                ones_Sigma = tf.ones_like(uniqueInd, dtype=tf.float32)
                ones_Sigma = tf.unsorted_segment_sum(ones_Sigma, uniqueInd,
                                                     numUnique)
                clusterSigma = tf.unsorted_segment_sum(T, uniqueInd, numUnique)
                clusterSigma = tf.divide(clusterSigma, ones_Sigma)

                # Lvar = tf.reduce_mean(clusterSigma, axis=0)
                Lvar = tf.reduce_mean(clusterSigma)

                mu_interleaved_rep = tf.tile(mu, [numUnique, 1])
                mu_band_rep = tf.tile(mu, [1, numUnique])
                mu_band_rep = tf.reshape(mu_band_rep,
                                         (numUnique * numUnique, nDim))

                mu_diff = tf.subtract(mu_band_rep, mu_interleaved_rep)
                # Remove zero vector
                # intermediate_tensor = reduce_sum(tf.abs(x), 1)
                # zero_vector = tf.zeros(shape=(1,1), dtype=tf.float32)
                # bool_mask = tf.not_equal(intermediate_tensor, zero_vector)
                # omit_zeros = tf.boolean_mask(x, bool_mask)
                intermediate_tensor = tf.reduce_sum(tf.abs(mu_diff), 1)
                zero_vector = tf.zeros(shape=(1, 1), dtype=tf.float32)
                bool_mask = tf.not_equal(intermediate_tensor, zero_vector)
                omit_zeros = tf.boolean_mask(mu_diff, bool_mask)
                mu_diff = tf.expand_dims(omit_zeros, axis=1)
                print mu_diff
                mu_diff = tf.norm(mu_diff, ord=1)
                # squared_norm = tf.reduce_sum(tf.square(s), axis=axis,keep_dims=True)
                # safe_norm = tf.sqrt(squared_norm + epsilon)
                # squared_norm = tf.reduce_sum(tf.square(omit_zeros), axis=-1,keep_dims=True)
                # safe_norm = tf.sqrt(squared_norm + 1e-6)
                # mu_diff = safe_norm

                mu_diff = tf.divide(mu_diff, Lreg)

                mu_diff = tf.subtract(2 * delta_d, mu_diff)
                mu_diff = tf.clip_by_value(mu_diff, 0, mu_diff)
                mu_diff = tf.square(mu_diff)

                numUniqueF = tf.cast(numUnique, tf.float32)
                Ldist = tf.reduce_mean(mu_diff)

                # L = alpha * Lvar + beta * Ldist + gamma * Lreg
                # L = tf.reduce_mean(L, keep_dims=True)
                L = tf.reduce_sum([alpha * Lvar, beta * Ldist, gamma * Lreg],
                                  keep_dims=False)
                print L
                print Ldist
                print Lvar
                print Lreg
                return tf.identity(L, name=name)
Exemplo n.º 51
0
my_filter = tf.constant(0.25,shape=[2,2,1,1])
my_strides = [1,2,2,1]
mov_avg_layer = tf.nn.conv2d(x_data,my_filter,my_strides,padding="SAME",name="Moving_Avg_Window")
def custom_layer(input_matrix):
    input_matrix_squeezed = tf.squeeze(input_matrix)
    A = tf.constant([[1.,2.],[-1.,3.]])
    b = tf.constant(1.,shape=[2,2])
    temp1 = tf.matmul(A,input_matrix_squeezed)
    temp = tf.add(temp1,b)  # Ax+b
    return (tf.sigmoid(temp))
with tf.name_scope('Custom_Layer') as scope:
    custom_layer1 = custom_layer(mov_avg_layer)
print(sess.run(custom_layer1,feed_dict={x_data:x_val}))

import matplotlib.pyplot as plt
x_vals = tf.linspace(-1.,1.,500)
target = tf.constant(0.0)

l2_y_vals = tf.square(target-x_vals)
l2_y_out = sess.run(l2_y_vals)

l1_y_vals = tf.abs(target-x_vals)
l1_y_out = sess.run(l1_y_vals)
x_array = sess.run(x_vals)
plt.plot(x_array,l2_y_out,'b-',label='L2 Loss')
plt.plot(x_array,l1_y_out,'g:',label='L1 Loss')
plt.ylim(-0.2,0.4)
plt.legend(loc="lower right",prop={'size':11})
plt.show()

delta1 = tf.constant(0.25)
Exemplo n.º 52
0
def affine_grid_generator(height, width, theta):
    """
    This function returns a sampling grid, which when
    used with the bilinear sampler on the input feature
    map, will create an output feature map that is an
    affine transformation [1] of the input feature map.

    Input
    -----
    - height: desired height of grid/output. Used
      to downsample or upsample.

    - width: desired width of grid/output. Used
      to downsample or upsample.

    - theta: affine transform matrices of shape (num_batch, 2, 3).
      For each image in the batch, we have 6 theta parameters of
      the form (2x3) that define the affine transformation T.

    Returns
    -------
    - normalized grid (-1, 1) of shape (num_batch, 2, H, W).
      The 2nd dimension has 2 components: (x, y) which are the
      sampling points of the original image for each point in the
      target image.

    Note
    ----
    [1]: the affine transformation allows cropping, translation,
         and isotropic scaling.
    """
    num_batch = tf.shape(theta)[0]

    # create normalized 2D grid
    x = tf.linspace(-1.0, 1.0, width)
    y = tf.linspace(-1.0, 1.0, height)
    x_t, y_t = tf.meshgrid(x, y)

    # flatten
    x_t_flat = tf.reshape(x_t, [-1])
    y_t_flat = tf.reshape(y_t, [-1])

    # reshape to [x_t, y_t , 1] - (homogeneous form)
    ones = tf.ones_like(x_t_flat)
    sampling_grid = tf.stack([x_t_flat, y_t_flat, ones])

    # repeat grid num_batch times
    sampling_grid = tf.expand_dims(sampling_grid, axis=0)
    sampling_grid = tf.tile(sampling_grid, tf.stack([num_batch, 1, 1]))

    # cast to float32 (required for matmul)
    theta = tf.cast(theta, 'float32')
    sampling_grid = tf.cast(sampling_grid, 'float32')

    # transform the sampling grid - batch multiply
    batch_grids = tf.matmul(theta, sampling_grid)
    # batch grid has shape (num_batch, 2, H*W)

    # reshape to (num_batch, H, W, 2)
    batch_grids = tf.reshape(batch_grids, [num_batch, 2, height, width])

    return batch_grids
Exemplo n.º 53
0
X_train, X_test = X_train / 255.0, X_test / 255.0
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

#one-hot
Y_train = tf.keras.utils.to_categorical(Y_train, NB_CLASSES)
Y_test = tf.keras.utils.to_categorical(Y_test, NB_CLASSES)

# Training the model for each combination of the hyperparameters.

#A unique number for each training session
session_num = 0

#Nested for loop training with all possible  combinathon of hyperparameters
for num_units in HP_NUM_UNITS.domain.values:
    for dropout_rate in tf.linspace(HP_DROPOUT.domain.min_value,
                                    HP_DROPOUT.domain.max_value, 3):
        for optimizer in HP_OPTIMIZER.domain.values:
            for batchsize in HP_BATCHSIZE.domain.values:
                hparams = {
                    HP_NUM_UNITS: num_units,
                    HP_DROPOUT: float(
                        "%.2f" % float(dropout_rate)
                    ),  # float("%.2f"%float(dropout_rate)) limits the decimal palces to 2
                    HP_OPTIMIZER: optimizer,
                    HP_BATCHSIZE: batchsize,
                }
                run_name = "run-%d" % session_num
                print('--- Starting trial: %s' % run_name)
                print({h.name: hparams[h] for h in hparams})
                run('logs/hparam_tuning/' + run_name, hparams)
                session_num += 1
Exemplo n.º 54
0
def sin_phase(mod_rate):
  phase = tf.sin(tf.linspace(0.0, mod_rate * n_seconds * 2.0 * np.pi, n_samples))
  phase = (phase[tf.newaxis, :, tf.newaxis] + 1.0) / 2.0  # Scale to [0, 1.0]
  return phase
Exemplo n.º 55
0
def inference_mem(images,
                  cams,
                  depth_num,
                  depth_start,
                  depth_interval,
                  network_mode,
                  is_master_gpu=True,
                  training=True,
                  trainable=True,
                  inverse_depth=False):
    """ inference of depth image from multi-view images and cameras """

    # dynamic gpu params
    depth_end = depth_start + \
        (tf.cast(depth_num, tf.float32) - 1) * depth_interval
    feature_c = 32
    feature_h = FLAGS.height / 4
    feature_w = FLAGS.width / 4

    # reference image
    ref_image = tf.squeeze(tf.slice(images, [0, 0, 0, 0, 0],
                                    [-1, 1, -1, -1, 3]),
                           axis=1)
    ref_cam = tf.squeeze(tf.slice(cams, [0, 0, 0, 0, 0], [-1, 1, 2, 4, 4]),
                         axis=1)

    # image feature extraction
    reuse = tf.app.flags.FLAGS.reuse_vars  #not is_master_gpu
    ref_tower = UNetDS2GN({'data': ref_image},
                          trainable=trainable,
                          training=training,
                          mode=network_mode,
                          reuse=reuse)
    base_divisor = ref_tower.base_divisor
    feature_c /= base_divisor
    ref_feature = ref_tower.get_output()
    ref_feature2 = tf.square(ref_feature)

    view_features = []
    for view in range(1, FLAGS.view_num):
        view_image = tf.squeeze(tf.slice(images, [0, view, 0, 0, 0],
                                         [-1, 1, -1, -1, -1]),
                                axis=1)
        view_tower = UNetDS2GN({'data': view_image},
                               trainable=trainable,
                               training=training,
                               mode=network_mode,
                               reuse=True)
        view_features.append(view_tower.get_output())
    view_features = tf.stack(view_features, axis=0)

    # get all homographies
    view_homographies = []
    for view in range(1, FLAGS.view_num):
        view_cam = tf.squeeze(tf.slice(cams, [0, view, 0, 0, 0],
                                       [-1, 1, 2, 4, 4]),
                              axis=1)
        if inverse_depth:
            homographies = get_homographies_inv_depth(ref_cam,
                                                      view_cam,
                                                      depth_num=depth_num,
                                                      depth_start=depth_start,
                                                      depth_end=depth_end)
        else:
            homographies = get_homographies(ref_cam,
                                            view_cam,
                                            depth_num=depth_num,
                                            depth_start=depth_start,
                                            depth_interval=depth_interval)
        view_homographies.append(homographies)
    view_homographies = tf.stack(view_homographies, axis=0)

    # build cost volume by differentialble homography
    with tf.name_scope('cost_volume_homography'):
        depth_costs = []

        for d in range(depth_num):
            # compute cost (standard deviation feature)
            ave_feature = tf.Variable(
                tf.zeros([FLAGS.batch_size, feature_h, feature_w, feature_c]),
                name='ave',
                trainable=False,
                collections=[tf.GraphKeys.LOCAL_VARIABLES])
            ave_feature2 = tf.Variable(
                tf.zeros([FLAGS.batch_size, feature_h, feature_w, feature_c]),
                name='ave2',
                trainable=False,
                collections=[tf.GraphKeys.LOCAL_VARIABLES])
            ave_feature = tf.assign(ave_feature, ref_feature)
            ave_feature2 = tf.assign(ave_feature2, ref_feature2)

            def body(view, ave_feature, ave_feature2):
                """Loop body."""
                homography = tf.slice(view_homographies[view],
                                      begin=[0, d, 0, 0],
                                      size=[-1, 1, 3, 3])
                homography = tf.squeeze(homography, axis=1)
                # warped_view_feature = homography_warping(view_features[view], homography)
                warped_view_feature = tf_transform_homography(
                    view_features[view], homography)
                ave_feature = tf.assign_add(ave_feature, warped_view_feature)
                ave_feature2 = tf.assign_add(ave_feature2,
                                             tf.square(warped_view_feature))
                view = tf.add(view, 1)
                return view, ave_feature, ave_feature2

            view = tf.constant(0)
            cond = lambda view, *_: tf.less(view, FLAGS.view_num - 1)
            _, ave_feature, ave_feature2 = tf.while_loop(
                cond,
                body, [view, ave_feature, ave_feature2],
                back_prop=False,
                parallel_iterations=1)

            ave_feature = tf.assign(
                ave_feature,
                tf.square(ave_feature) / (FLAGS.view_num * FLAGS.view_num))
            ave_feature2 = tf.assign(
                ave_feature2, ave_feature2 / FLAGS.view_num - ave_feature)
            depth_costs.append(ave_feature2)
        cost_volume = tf.stack(depth_costs, axis=1)

    # filtered cost volume, size of (B, D, H, W, 1)
    filtered_cost_volume_tower = RegNetUS0({'data': cost_volume},
                                           trainable=trainable,
                                           training=training,
                                           mode=network_mode,
                                           reuse=reuse)
    filtered_cost_volume = tf.squeeze(filtered_cost_volume_tower.get_output(),
                                      axis=-1)

    # depth map by softArgmin
    with tf.name_scope('soft_arg_min'):
        # probability volume by soft max
        probability_volume = tf.nn.softmax(tf.scalar_mul(
            -1, filtered_cost_volume),
                                           axis=1,
                                           name='prob_volume')
        # depth image by soft argmin
        volume_shape = tf.shape(probability_volume)
        soft_2d = []
        for i in range(FLAGS.batch_size):
            if inverse_depth:
                inv_depth_start = tf.reshape(tf.div(1.0, depth_start[i]), [])
                inv_depth_end = tf.reshape(tf.div(1.0, depth_end[i]), [])
                inv_depth = tf.lin_space(inv_depth_start, inv_depth_end,
                                         tf.cast(depth_num, tf.int32))
                soft_1d = tf.div(1.0, inv_depth)
            else:
                soft_1d = tf.linspace(depth_start[i], depth_end[i],
                                      tf.cast(depth_num, tf.int32))
            soft_2d.append(soft_1d)
        soft_2d = tf.reshape(tf.stack(soft_2d, axis=0),
                             [volume_shape[0], volume_shape[1], 1, 1])
        soft_4d = tf.tile(soft_2d, [1, 1, volume_shape[2], volume_shape[3]])
        estimated_depth_map = tf.reduce_sum(soft_4d * probability_volume,
                                            axis=1)
        estimated_depth_map = tf.expand_dims(estimated_depth_map, axis=3)

    # probability map
    prob_map = get_probability_map(probability_volume,
                                   estimated_depth_map,
                                   depth_start,
                                   depth_interval,
                                   inverse_depth=inverse_depth)

    # return filtered_depth_map,
    return estimated_depth_map, prob_map
Exemplo n.º 56
0
print(sess.run(constant_tsr))

# 2. 相似形状的张量

zeros_similar = tf.zeros_like(constant_tsr)
print(zeros_similar)
print(sess.run(zeros_similar))

ones_similar = tf.ones_like(constant_tsr)
print(ones_similar)
print(sess.run(ones_similar))

# 3. 序列张量

# Linspace in TensorFlow
linear_tsr = tf.linspace(start=0.0, stop=1.0,
                         num=3)  # 生成 [0.0, 0.5, 1.0] ,包含stop
print(linear_tsr)
print(sess.run(linear_tsr))
print('-----------------------------')
# Range in TensorFlow
sequence_var = tf.Variable(tf.range(start=6.0, limit=15.0,
                                    delta=3.24))  # 生成 [6, 9, 12] ,不包含limit
sess.run(sequence_var.initializer)
print(sequence_var)
print(sess.run(sequence_var))
# sequence_var在这里是一个变量, 使用tf.Variable来将张量封装成一个变量
# 声明变量之后需要初始化才能使用
# 也可以使用下边的方式来声明
# initialize_op = tf.global_variables_initializer()
# sess.run(initialize_op)
Exemplo n.º 57
0
def fft_frequencies(sample_rate, n_fft):
    # Equal to librosa.core.fft_frequencies
    begin = 0.0
    end = tf.cast(sample_rate // 2, tf.float32)
    step = 1 + n_fft // 2
    return tf.linspace(begin, end, step)
Exemplo n.º 58
0
    def call(self, inputs, training=None, mask=None):
        pixels, word_indices, pattern_indices, char_indices, memory_mask, parses = inputs

        # pixels: (bs, h, w)
        # word_indices: (bs, h, w)
        # pattern_indices: (bs, h, w)
        # char_indices: (bs, h, w)
        # memory_mask: (bs, h, w, m, l, d)
        # parses: (bs, h, w, 4, 2)

        bs = tf.shape(pixels)[0]
        h, w = InvoiceData.im_size[0], InvoiceData.im_size[1]

        X, Y = tf.meshgrid(tf.linspace(0.0, 1.0, InvoiceData.im_size[1]), tf.linspace(0.0, 1.0, InvoiceData.im_size[0]))
        X = tf.tile(X[None, ..., None], (bs, 1, 1, 1))
        Y = tf.tile(Y[None, ..., None], (bs, 1, 1, 1))

        word_embeddings = tf.reshape(
            self.word_embed(tf.reshape(word_indices, (bs, -1))),
            (bs, h, w, self.embed_size)
        )

        pattern_embeddings = tf.reshape(
            self.pattern_embed(tf.reshape(pattern_indices, (bs, -1))),
            (bs, h, w, self.embed_size)
        )

        char_embeddings = tf.reshape(
            self.char_embed(tf.reshape(char_indices, (bs, -1))),
            (bs, h, w, self.embed_size)
        )

        pixels = tf.reshape(pixels, (bs, h, w, 3))
        parses = tf.reshape(parses, (bs, h, w, InvoiceData.n_memories * 2))
        memory_mask = tf.reshape(memory_mask, (bs, h, w, 1))
        x = tf.concat([pixels, word_embeddings, pattern_embeddings, char_embeddings, parses, X, Y, memory_mask],
                      axis=3)

        x = self.conv_block(x)
        x = self.dropout(x, training=training)

        pre_att_logits = x
        att_logits = self.conv_att(x)  # (bs, h, w, n_memories)
        att_logits = memory_mask * att_logits - (
                1.0 - memory_mask) * 1000  # TODO only sum the memory_mask idx, in the softmax

        logits = tf.reshape(att_logits, (bs, -1))  # (bs, h * w * n_memories)
        logits -= tf.reduce_max(logits, axis=1, keepdims=True)
        lp = tf.math.log_softmax(logits, axis=1)  # (bs, h * w * n_memories)
        p = tf.math.softmax(logits, axis=1)  # (bs, h * w * n_memories)

        spatial_attention = tf.reshape(p, (bs, h * w * InvoiceData.n_memories, 1, 1))  # (bs, h * w * n_memories, 1, 1)

        p_uniform = memory_mask / tf.reduce_sum(memory_mask, axis=(1, 2, 3), keepdims=True)
        cross_entropy_uniform = -tf.reduce_sum(p_uniform * tf.reshape(lp, (bs, h, w, InvoiceData.n_memories)),
                                               axis=(1, 2, 3))  # (bs, 1)

        cp = tf.reduce_sum(tf.reshape(p, (bs, h, w, InvoiceData.n_memories)), axis=3, keepdims=True)

        context = tf.reduce_sum(cp * pre_att_logits, axis=(1, 2))  # (bs, 4*n_hidden)

        self.add_loss(self.frac_ce_loss * tf.reduce_mean(cross_entropy_uniform))

        return spatial_attention, context
Exemplo n.º 59
0
import tensorflow as tf

a = tf.linspace(0., 1., 10)
a = tf.expand_dims(a, 0)

b = tf.transpose(a)

print(tf.matmul(a, b))
Exemplo n.º 60
0
    var2 = tf.Variable(tf.constant(2.3), dtype=tf.float32)
    # var2=tf.Variable(tf.constant([2.0,3,4,5,6]),dtype=tf.float32)
    mul = tf.multiply(var4, var3)
    sess = tf.Session()
    print(sess.run(mul))

# print (g.as_graph_element())
# '______________________________________________________________________________________
# martrix multiply

mat1 = tf.constant([7, 8, 9, 12, 34, 21], dtype=tf.float32, shape=[2, 3])

mat2 = tf.constant([1, 2, 3, 4, 5, 6], dtype=tf.float32,
                   shape=[3,
                          2])  # shape matter a lot for matrix multiplication
mat3 = tf.linspace(start=1.0, stop=10,
                   num=6)  # to create you own list of constant
mul = tf.matmul(mat1, mat2)
new_dat = tf.range(10)  # print range of value just like in python
shuffle = tf.random_shuffle(new_dat)
sess = tf.Session()
print(sess.run(mul))
print(sess.run(mat3))
print(sess.run(new_dat))
print(sess.run(shuffle))

# '______________________________________________________________________________________
# further function on martrix
identy = tf.eye(3)  # create idendity matrix

b = tf.Variable(tf.random_uniform(
    [5, 10], 0, 2,