예제 #1
0
  def _extend_support_with_default_value(self, x, f, default_value):
    """Returns `f(x)` if x is in the support, and `default_value` otherwise.

    Given `f` which is defined on the support of this distribution
    (`x >= loc`), extend the function definition to the real line
    by defining `f(x) = default_value` for `x < loc`.

    Args:
      x: Floating-point `Tensor` to evaluate `f` at.
      f: Callable that takes in a `Tensor` and returns a `Tensor`. This
        represents the function whose domain of definition we want to extend.
      default_value: Python or numpy literal representing the value to use for
        extending the domain.
    Returns:
      `Tensor` representing an extension of `f(x)`.
    """
    with tf.name_scope(name="extend_support_with_default_value", values=[x]):
      x = tf.convert_to_tensor(x, dtype=self.dtype, name="x")
      loc = self.loc + tf.zeros_like(self.scale) + tf.zeros_like(x)
      x = x + tf.zeros_like(loc)
      # Substitute out-of-support values in x with values that are in the
      # support of the distribution before applying f.
      y = f(tf.where(x < loc, self._inv_z(0.5), x))
      if default_value == 0.:
        default_value = tf.zeros_like(y)
      elif default_value == 1.:
        default_value = tf.ones_like(y)
      else:
        default_value = tf.fill(
            dims=tf.shape(y),
            value=np.array(default_value, dtype=self.dtype.as_numpy_dtype))
      return tf.where(x < loc, default_value, y)
예제 #2
0
 def create_discriminator(self, _input, reuse=False):
     config = self.config
     gan = self.gan
     print("___", _input, self.g0, self.x0, self.c0)
     _fs = tf.concat([tf.zeros_like(self.c0),tf.zeros_like(self.c0)],axis=0)
     disc = self.create_component(config.discriminator, name='discriminator', input=_input, features=[_fs], reuse=reuse)
     return disc
  def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    var_list = [ v for _,v in grads_and_vars]
    d_vars = []
    g_vars = []
    all_grads = [ g for g, _ in grads_and_vars ]
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise("Couldn't find var in g_vars or d_vars")

    with ops.init_scope():
        self.optimizer._create_slots([v for g,v in grads_and_vars])

    self._prepare()
    d_grads = all_grads[:len(d_vars)]
    if self.config.type == 'sga':
        Jgrads = tf.gradients(d_grads, d_vars, grad_ys=d_grads, stop_gradients=d_vars) + [tf.zeros_like(g) for g in g_vars]
    elif self.config.type == 'magnitude':
        consensus_reg = [tf.square(g) for g in d_grads if g is not None]
        Jgrads = tf.gradients(consensus_reg, d_vars) + [tf.zeros_like(g) for g in g_vars]
    else:
        consensus_reg = 0.5 * sum(
                tf.reduce_sum(tf.square(g)) for g in d_grads if g is not None
        )
        Jgrads = tf.gradients(consensus_reg, d_vars, stop_gradients=d_vars) + [tf.zeros_like(g) for g in g_vars]
    new_grads = [g+jg*self._beta if jg is not None else g for g,v,jg in zip(all_grads, var_list, Jgrads)]
    new_grads_and_vars = list(zip(new_grads, var_list)).copy()
    return self.optimizer.apply_gradients(new_grads_and_vars, global_step=global_step, name=name)
예제 #4
0
    def __init__(self,
                 sess,
                 dataset_name='facades',
                 checkpoint_dir=None):
        self.sess = sess
        self.dataset_name = dataset_name
        self.checkpoint_dir = checkpoint_dir

        self.real_data = tf.placeholder(tf.float32,
                                        [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3 + 3],
                                        name='input_images')
        self.real_A = self.real_data[:, :, :, :3]
        self.real_B = self.real_data[:, :, :, 3:6]

        self.fake_B = generator(self.real_A, name="generatorA2B")
        self.fake_A = generator(self.real_B, name="generatorB2A")
        self.fake_B_fake_A = generator(self.fake_B, reuse=True, name="generatorB2A")
        self.fake_A_fake_B = generator(self.fake_A, reuse=True, name="generatorA2B")

        self.DA_real = discriminator(self.real_A, reuse=False, name="descriminatorA")
        self.DB_real = discriminator(self.real_B, reuse=False, name="descriminatorB")
        self.DA_fake = discriminator(self.fake_A, reuse=True, name="descriminatorA")
        self.DB_fake = discriminator(self.fake_B, reuse=True, name="descriminatorB")

        self.g_loss_a2b = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_fake, labels=tf.ones_like(self.DB_fake))) + 100 * tf.reduce_mean(
            tf.abs(self.real_A - self.fake_B_fake_A)) + 100 * tf.reduce_mean(
            tf.abs(self.real_B - self.fake_B))
        self.g_loss_b2a = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_fake, labels=tf.ones_like(self.DA_fake))) + 100 * tf.reduce_mean(
            tf.abs(self.real_B - self.fake_A_fake_B)) + 100 * tf.reduce_mean(
            tf.abs(self.real_A - self.fake_A))
        self.g_loss = self.g_loss_a2b + self.g_loss_b2a

        self.d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_fake, labels=tf.zeros_like(self.DB_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_real, labels=tf.ones_like(self.DB_real))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_fake, labels=tf.zeros_like(self.DA_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_real, labels=tf.ones_like(self.DA_real)))

        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.g_loss_a2b_sum = tf.summary.scalar("g_loss_a2b", self.g_loss_a2b)
        self.g_loss_b2a_sum = tf.summary.scalar("g_loss_b2a", self.g_loss_b2a)
        self.real_A_sum = tf.summary.image("real_A", self.real_A)
        self.real_B_sum = tf.summary.image("real_B", self.real_B)
        self.fake_A_sum = tf.summary.image("fake_A", self.fake_A)
        self.fake_B_sum = tf.summary.image("fake_B", self.fake_B)
        self.fake_AB_sum = tf.summary.image("fake_AB", self.fake_A_fake_B)
        self.fake_BA_sum = tf.summary.image("fake_BA", self.fake_B_fake_A)

        self.d_sum = tf.summary.merge([self.d_loss_sum])
        self.g_sum = tf.summary.merge([self.g_loss_sum, self.g_loss_a2b_sum, self.g_loss_b2a_sum,
                                       self.real_A_sum, self.real_B_sum, self.fake_A_sum,
                                       self.fake_B_sum, self.fake_AB_sum, self.fake_BA_sum])

        training_vars = tf.trainable_variables()
        self.d_vars = [var for var in training_vars if 'd_' in var.name]
        self.g_vars = [var for var in training_vars if 'g_' in var.name]
        self.saver = tf.train.Saver(max_to_keep=5)
예제 #5
0
  def UpdateProbs(self, inp):
    """Update probabilities of each particle based on 2D matrix inp which is a 2D perspectiuve projection of the scene"""

    projection, onscreen = self.project()
    filtered_projection = tf.to_int64(tf.select(onscreen, projection, tf.zeros_like(projection)))
    per_state_probabilities = tf.gather_nd(inp, filtered_projection)
    
    filtered_probabilities = tf.select(onscreen, per_state_probabilities, tf.zeros_like(per_state_probabilities))
    
    new_state_indicies = tf.squeeze(tf.multinomial(tf.expand_dims(tf.log(filtered_probabilities),0), self.particles/10*9))
    
    new_state = tf.gather(self.state, new_state_indicies)
    
    # Add momentum
    new_state = tf.concat(1, [new_state[:, 0:3] + new_state[:, 3:6], new_state[:, 3:10]])
    
    # Add in particles for the "just come onscreen" case.
    new_state = tf.concat(0, [new_state, tf.random_normal([self.particles/10, 10]) * self.initial_std + self.initial_bias])

    
    new_state = new_state + tf.random_normal([self.particles, 10]) * self.update_std
    # Todo:  permute state by adding noise.

    
    return self.state.assign(new_state)
예제 #6
0
파일: models.py 프로젝트: 812864539/models
  def __call__(self, next_state, observation, t):
    # next state = z_{t+1}
    # Compute the q distribution over z, q(z_{t}|z_n, z_{t+1}).
    q_zt = self.q_zt(observation, next_state, t)
    # sample from q
    zt = q_zt.sample()
    # Compute the p distribution over z, p(z_{t+1}|z_{t}).
    p_zt = self.p_zt(zt, t)
    # Compute log p(z_{t+1} | z_t)
    if t == 0:
      log_p_zt = p_zt.log_prob(observation)
    else:
      log_p_zt = p_zt.log_prob(next_state)

    # Compute r prior over zt
    r_zt = self.r(zt, t)
    log_r_zt = r_zt.log_prob(zt)
    # Compute proposal density at zt
    log_q_zt = q_zt.log_prob(zt)
    # If we're at the last timestep, also calc the logprob of the observation.

    if t == self.num_timesteps - 1:
      p_z0_dist = tf.contrib.distributions.Normal(
          loc=tf.zeros_like(zt), scale=tf.ones_like(zt))
      z0_log_prob = p_z0_dist.log_prob(zt)
    else:
      z0_log_prob = tf.zeros_like(log_q_zt)
    return (zt, log_q_zt, log_p_zt, z0_log_prob, log_r_zt)
예제 #7
0
 def Loop(cell, w, i):
   x = tf.unpack(i, self.NUM_UNROLL)
   m = tf.zeros_like(x[0])
   c = tf.zeros_like(x[0])
   for i in range(self.NUM_UNROLL):
     m, c = cell(x[i], m, c, w)
   return m
예제 #8
0
    def loss(self, x, y):
        '''
        Args:
            x: shape=[s, b, c]
            y: shape=[s, b]
        Returns:
            a `dict` of losses
        '''
        z_mu, z_lv = self._encode(x, is_training=self.is_training)
        z = GaussianSampleLayer(z_mu, z_lv)
        xh = self._decode(z, y, is_training=self.is_training)

        with tf.name_scope('loss'):
            with tf.name_scope('E_log_p_x_zy'):
                L_x = -1.0 * tf.reduce_mean(
                    GaussianLogDensity(x, xh, tf.zeros_like(x)),
                )
            with tf.name_scope('D_KL_z'):
                L_z = tf.reduce_mean(
                    GaussianKLD(
                        z_mu, z_lv,
                        tf.zeros_like(z_mu), tf.zeros_like(z_lv)
                    )
                )
            loss = {
                'L_x': L_x,
                'L_z': L_z,
            }

        tf.summary.scalar('L_x', L_x)
        tf.summary.scalar('L_z', L_z)
        return loss
def evaluate_precision_recall(
    input_layer, labels, threshold=0.5, per_example_weights=None, name=PROVIDED, phase=Phase.train
):
    """Computes the precision and recall of the prediction vs the labels.

  Args:
    input_layer: A Pretty Tensor object.
    labels: The target labels to learn as a float tensor.
    threshold: The threshold to use to decide if the prediction is true.
    per_example_weights: A Tensor with a weight per example.
    name: An optional name.
    phase: The phase of this model; non training phases compute a total across
      all examples.
  Returns:
    Precision and Recall.
  """
    _ = name  # Eliminate warning, name used for namescoping by PT.
    selected, sum_retrieved, sum_relevant = _compute_precision_recall(
        input_layer, labels, threshold, per_example_weights
    )

    if phase != Phase.train:
        dtype = tf.float32
        # Create the variables in all cases so that the load logic is easier.
        relevant_count = tf.get_variable(
            "relevant_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )
        retrieved_count = tf.get_variable(
            "retrieved_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )
        selected_count = tf.get_variable(
            "selected_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )

        with input_layer.g.device(selected_count.device):
            selected = tf.assign_add(selected_count, selected)
        with input_layer.g.device(retrieved_count.device):
            sum_retrieved = tf.assign_add(retrieved_count, sum_retrieved)
        with input_layer.g.device(relevant_count.device):
            sum_relevant = tf.assign_add(relevant_count, sum_relevant)

    return (
        tf.select(tf.equal(sum_retrieved, 0), tf.zeros_like(selected), selected / sum_retrieved),
        tf.select(tf.equal(sum_relevant, 0), tf.zeros_like(selected), selected / sum_relevant),
    )
예제 #10
0
    def compute_losses(self, images, wrong_images, fake_images, embeddings):
        real_logit = self.model.get_discriminator(images, embeddings)
        wrong_logit = self.model.get_discriminator(wrong_images, embeddings)
        fake_logit = self.model.get_discriminator(fake_images, embeddings)

        real_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(real_logit,
                                                    tf.ones_like(real_logit))
        real_d_loss = tf.reduce_mean(real_d_loss)
        wrong_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(wrong_logit,
                                                    tf.zeros_like(wrong_logit))
        wrong_d_loss = tf.reduce_mean(wrong_d_loss)
        fake_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
                                                    tf.zeros_like(fake_logit))
        fake_d_loss = tf.reduce_mean(fake_d_loss)
        if cfg.TRAIN.B_WRONG:
            discriminator_loss =\
                real_d_loss + (wrong_d_loss + fake_d_loss) / 2.
            self.log_vars.append(("d_loss_wrong", wrong_d_loss))
        else:
            discriminator_loss = real_d_loss + fake_d_loss
        self.log_vars.append(("d_loss_real", real_d_loss))
        self.log_vars.append(("d_loss_fake", fake_d_loss))

        generator_loss = \
            tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
                                                    tf.ones_like(fake_logit))
        generator_loss = tf.reduce_mean(generator_loss)

        return discriminator_loss, generator_loss
예제 #11
0
  def default_exchange_proposed_fn_(num_replica, seed=None):
    """Default function for `exchange_proposed_fn` of `kernel`."""
    num_replica = tf.to_int32(num_replica)

    seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
    random_uniform = tf.random_uniform([], seed=seed)
    accept_proposed_exchange = random_uniform < probs

    seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
    zero_start = tf.random_uniform([], seed=seed) > 0.5
    if num_replica % 2 == 0:
      exchange_proposed = tf.where(
          zero_start, tf.range(num_replica),
          tf.sparse_to_dense(tf.range(num_replica - 2), (num_replica,),
                             tf.range(1, num_replica - 1)))
      exchange_proposed_n = tf.where(zero_start, num_replica // 2,
                                     num_replica // 2 - 1)
    else:
      exchange_proposed = tf.where(
          zero_start, tf.range(num_replica - 1), tf.range(1, num_replica))
      exchange_proposed_n = num_replica // 2

    exchange_proposed = tf.reshape(exchange_proposed, (num_replica // 2, 2))
    exchange_proposed = tf.where(accept_proposed_exchange, exchange_proposed,
                                 tf.zeros_like(exchange_proposed))
    exchange_proposed_n = tf.where(accept_proposed_exchange,
                                   exchange_proposed_n,
                                   tf.zeros_like(exchange_proposed_n))
    return exchange_proposed, exchange_proposed_n
예제 #12
0
파일: dae.py 프로젝트: hussius/StackedDAE
    def _loss_x_entropy(self, x, z, noise=None):
        with tf.name_scope("xentropy_loss"):
            z_clipped = tf.clip_by_value(z, FLAGS.zero_bound, FLAGS.one_bound)
            z_minus_1_clipped = tf.clip_by_value((1.0 - z), FLAGS.zero_bound, FLAGS.one_bound)
            x_clipped = tf.clip_by_value(x, FLAGS.zero_bound, FLAGS.one_bound)
            x_minus_1_clipped = tf.clip_by_value((1.0 - x), FLAGS.zero_bound, FLAGS.one_bound)
            
            # cross_entropy = x * log(z) + (1 - x) * log(1 - z)
            
            cross_entropy = tf.add(tf.mul(tf.log(z_clipped), x_clipped),
                                   tf.mul(tf.log(z_minus_1_clipped), x_minus_1_clipped), name='X-Entr')

            if noise:
                with tf.name_scope("Given_Emphasis"):
                    a, b = self._get_emph_params
                    corrupted = tf.select(noise, cross_entropy, tf.zeros_like(cross_entropy), name='Corrupted_Emphasis')
                    
                    # OR -- tf.select(tf.logical_not(noisy_points), cross_entropy, tf.zeros_like(cross_entropy), name='Uncorrupted_Emphasis')
                    uncorrupted = tf.select(noise, tf.zeros_like(cross_entropy), cross_entropy, name='Uncorrupted_Emphasis')
                    
                    loss = a * (-1 * tf.reduce_sum(corrupted, 1)) + b * (-1 * tf.reduce_sum(uncorrupted, 1))
            else:
                # Sum the cost for each example
                loss = -1 * tf.reduce_sum(cross_entropy, 1)
        
            # Reduce mean to find the overall cost of the loss
            cross_entropy_mean = tf.reduce_mean(loss, name='xentropy_mean')
    
            return cross_entropy_mean
예제 #13
0
    def cut(self, hits, start, end):
        """
        Cuts [start:end] diapason from input data
        :param hits: hits timeseries
        :param start: start index
        :param end: end index
        :return: tuple (train_hits, test_hits, dow, lagged_hits)
        """
        # Pad hits to ensure we have enough array length for prediction
        hits = tf.concat([hits, tf.fill([self.predict_window], np.NaN)], axis=0)
        cropped_hit = hits[start:end]

        # cut day of week
        cropped_dow = self.inp.dow[start:end]

        # Cut lagged hits
        # gather() accepts only int32 indexes
        cropped_lags = tf.cast(self.inp.lagged_ix[start:end], tf.int32)
        # Mask for -1 (no data) lag indexes
        lag_mask = cropped_lags < 0
        # Convert -1 to 0 for gather(), it don't accept anything exotic
        cropped_lags = tf.maximum(cropped_lags, 0)
        # Translate lag indexes to hit values
        lagged_hit = tf.gather(hits, cropped_lags)
        # Convert masked (see above) or NaN lagged hits to zeros
        lag_zeros = tf.zeros_like(lagged_hit)
        lagged_hit = tf.where(lag_mask | tf.is_nan(lagged_hit), lag_zeros, lagged_hit)

        # Split for train and test
        x_hits, y_hits = tf.split(cropped_hit, [self.train_window, self.predict_window], axis=0)

        # Convert NaN to zero in for train data
        x_hits = tf.where(tf.is_nan(x_hits), tf.zeros_like(x_hits), x_hits)
        return x_hits, y_hits, cropped_dow, lagged_hit
  def __init__(self, gan=None, config=None, trainer=None, name="ProgressCompressTrainHook"):
    super().__init__(config=config, gan=gan, trainer=trainer, name=name)
    d_loss = []

    self.x = tf.Variable(tf.zeros_like(gan.inputs.x))
    self.g = tf.Variable(tf.zeros_like(gan.generator.sample))

    stacked = tf.concat([self.gan.inputs.x, self.gan.generator.sample], axis=0)
    self.assign_x = tf.assign(self.x, gan.inputs.x)
    self.assign_g = tf.assign(self.g, gan.generator.sample)
    self.re_init_d = [d.initializer for d in gan.discriminator.variables()]
    gan.hack = self.g

    self.assign_knowledge_base = []

    bs = gan.batch_size()
    real = gan.discriminator.named_layers['knowledge_base_target']#tf.reshape(gan.loss.sample[:2], [2,-1])
    _inputs = hc.Config({'x':real})
    inner_gan = KBGAN(config=self.config.knowledge_base, inputs=_inputs, x=real, latent=stacked)
    self.kb_loss = inner_gan.loss
    self.kb = inner_gan.generator
    self.trainer = inner_gan.trainer
    variables = inner_gan.variables()
    #variables += self.kb.variables()

    for c in gan.components:
        if hasattr(c, 'knowledge_base'):
            for name, net in c.knowledge_base:
                assign = self.kb.named_layers[name]
                if self.ops.shape(assign)[0] > self.ops.shape(net)[0]:
                    assign = tf.slice(assign,[0 for i in self.ops.shape(net)] , [self.ops.shape(net)[0]]+self.ops.shape(assign)[1:])
                self.assign_knowledge_base.append(tf.assign(net, assign))

    self.gan.add_metric('d_kb', self.kb_loss.sample[0])
    self.gan.add_metric('g_kb', self.kb_loss.sample[1])
예제 #15
0
  def _survival_function(self, y):
    low = self._low
    high = self._high

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= high,
    #                       = 1, if y < low,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = tf.ceil(y)

    # P[X > j], used when low < X < high.
    result_so_far = self.distribution.survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += tf.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if low is not None:
      result_so_far = tf.where(j < low, tf.ones_like(result_so_far),
                               result_so_far)
    if high is not None:
      result_so_far = tf.where(j >= high, tf.zeros_like(result_so_far),
                               result_so_far)

    return result_so_far
예제 #16
0
파일: models.py 프로젝트: 812864539/models
  def __call__(self, prev_state, observations, t):
    """Computes the importance weight for the model system.

    Args:
      prev_state: [batch_size, state_size] Tensor
      observations: [batch_size, num_observations, state_size] Tensor
    """
    # Compute the q distribution over z, q(z_t|z_n, z_{t-1}).
    q_zt = self.q.q_zt(observations, prev_state, t)
    # Compute the p distribution over z, p(z_t|z_{t-1}).
    p_zt = self.p.p_zt(prev_state, t)
    # sample from q and evaluate the logprobs, summing over the state size
    zt = q_zt.sample()
    log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=1)
    log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=1)
    if not self.disable_r and t < self.num_timesteps-1:
      # score the remaining observations using r
      r_xn = self.r.r_xn(zt, t)
      log_r_xn = r_xn.log_prob(observations[:, self.next_obs_ind(t+1):, :])
      # sum over state size and observation, leaving the batch index
      log_r_xn = tf.reduce_sum(log_r_xn, axis=[1,2])
    else:
      log_r_xn = tf.zeros_like(log_p_zt)
    if t != 0 and t % self.steps_per_obs == 0:
      generative_dist = self.p.generative(zt, t)
      log_p_x_given_z = generative_dist.log_prob(observations[:,self.next_obs_ind(t),:])
      log_p_x_given_z = tf.reduce_sum(log_p_x_given_z, axis=1)
    else:
      log_p_x_given_z = tf.zeros_like(log_q_zt)
    return (zt, log_q_zt, log_p_zt, log_p_x_given_z, log_r_xn)
예제 #17
0
  def _extend_support(self, x, f, alt):
    """Returns `f(x)` if x is in the support, and `alt` otherwise.

    Given `f` which is defined on the support of this distribution
    (e.g. x > scale), extend the function definition to the real line
    by defining `f(x) = alt` for `x < scale`.

    Args:
      x: Floating-point Tensor to evaluate `f` at.
      f: Lambda that takes in a tensor and returns a tensor. This represents
        the function who we want to extend the domain of definition.
      alt: Python or numpy literal representing the value to use for extending
        the domain.
    Returns:
      Tensor representing an extension of `f(x)`.
    """
    # We need to do a series of broadcasts for the tf.where.
    scale = self.scale + tf.zeros_like(self.concentration)
    is_invalid = x < scale
    scale = scale + tf.zeros_like(x)
    x = x + tf.zeros_like(scale)
    # We need to do this to ensure gradients are sound.
    y = f(tf.where(is_invalid, scale, x))
    if alt == 0.:
      alt = tf.zeros_like(y)
    elif alt == 1.:
      alt = tf.ones_like(y)
    else:
      alt = tf.fill(
          dims=tf.shape(y),
          value=np.array(alt, dtype=self.dtype.as_numpy_dtype))
    return tf.where(is_invalid, alt, y)
예제 #18
0
파일: model.py 프로젝트: Hukongtao/models
 def compute_first_or_last(self, select, first=True):
   #perform first ot last operation on row select with probabilistic row selection
   answer = tf.zeros_like(select)
   running_sum = tf.zeros([self.batch_size, 1], self.data_type)
   for i in range(self.max_elements):
     if (first):
       current = tf.slice(select, [0, i], [self.batch_size, 1])
     else:
       current = tf.slice(select, [0, self.max_elements - 1 - i],
                          [self.batch_size, 1])
     curr_prob = current * (1 - running_sum)
     curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type)
     running_sum += curr_prob
     temp_ans = []
     curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0)
     for i_ans in range(self.max_elements):
       if (not (first) and i_ans == self.max_elements - 1 - i):
         temp_ans.append(curr_prob)
       elif (first and i_ans == i):
         temp_ans.append(curr_prob)
       else:
         temp_ans.append(tf.zeros_like(curr_prob))
     temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans))
     answer += temp_ans
   return answer
예제 #19
0
  def _log_cdf(self, y):
    low = self._low
    high = self._high

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= high,
    #         = 0, if y < low,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = tf.floor(y)

    result_so_far = self.distribution.log_cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += tf.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if low is not None:
      neg_inf = -np.inf * tf.ones_like(result_so_far)
      result_so_far = tf.where(j < low, neg_inf, result_so_far)
    if high is not None:
      result_so_far = tf.where(j >= high, tf.zeros_like(result_so_far),
                               result_so_far)

    return result_so_far
예제 #20
0
    def loss(self, x, y):
        with tf.name_scope('loss'):
            z_mu, z_lv = self._encode(x)
            z = GaussianSampleLayer(z_mu, z_lv)
            xh = self._generate(z, y)

            D_KL = tf.reduce_mean(
                GaussianKLD(
                    slim.flatten(z_mu),
                    slim.flatten(z_lv),
                    slim.flatten(tf.zeros_like(z_mu)),
                    slim.flatten(tf.zeros_like(z_lv)),
                )
            )
            logPx = tf.reduce_mean(
                GaussianLogDensity(
                    slim.flatten(x),
                    slim.flatten(xh),
                    tf.zeros_like(slim.flatten(xh))),
            )

        loss = dict()
        loss['G'] = - logPx + D_KL
        loss['D_KL'] = D_KL
        loss['logP'] = logPx

        tf.summary.scalar('KL-div', D_KL)
        tf.summary.scalar('logPx', logPx)

        tf.summary.histogram('xh', xh)
        tf.summary.histogram('x', x)
        return loss
예제 #21
0
def classification_costs(logits, labels, name=None):
    """Compute classification cost mean and classification cost per sample

    Assume unlabeled examples have label == -1. For unlabeled examples, cost == 0.
    Compute the mean over all examples.
    Note that unlabeled examples are treated differently in error calculation.
    """
    with tf.name_scope(name, "classification_costs") as scope:
        applicable = tf.not_equal(labels, -1)

        # Change -1s to zeros to make cross-entropy computable
        labels = tf.where(applicable, labels, tf.zeros_like(labels))

        # This will now have incorrect values for unlabeled examples
        per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)

        # Retain costs only for labeled
        per_sample = tf.where(applicable, per_sample, tf.zeros_like(per_sample))

        # Take mean over all examples, not just labeled examples.
        labeled_sum = tf.reduce_sum(per_sample)
        total_count = tf.to_float(tf.shape(per_sample)[0])
        mean = tf.div(labeled_sum, total_count, name=scope)

        return mean, per_sample
예제 #22
0
파일: gradient.py 프로젝트: MLDL/tensorfuse
def grad(cost, wrt, known_grads=None, disconnected_inputs=None):
    ret = tf.gradients(cost, wrt)
    if isinstance(wrt, list):
        return [x if x is not None else tf.zeros_like(wrt[i]) for i, x in enumerate(ret)]
    elif ret[0] is not None:
        return ret[0]
    else:
        return tf.zeros_like(wrt)
예제 #23
0
 def LSTMLoop10(weights, inp):
   x = tf.unpack(inp, self.NUM_UNROLL)
   m = tf.zeros_like(x[0])
   c = tf.zeros_like(x[0])
   assert self.NUM_UNROLL % 10 == 0
   for i in range(0, self.NUM_UNROLL, 10):
     m, c = Loop10(weights, m, c, *x[i:i + 10])
   return m
예제 #24
0
def print_mask_parameter_counts():
    print("# Mask Parameter Counts")
    print("  - Mask1: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix1, tf.zeros_like(indicator_matrix1)))))))
    print("  - Mask2: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix2, tf.zeros_like(indicator_matrix2)))))))
    print("  - Mask3: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix3, tf.zeros_like(indicator_matrix3)))))))
예제 #25
0
    def bilinear_pool(self, x1, x2):

        p1 = tf.matmul(x1, self.C[0])
        p2 = tf.matmul(x2, self.C[1])
        pc1 = tf.complex(p1, tf.zeros_like(p1))
        pc2 = tf.complex(p2, tf.zeros_like(p2))

        conved = tf.batch_ifft(tf.batch_fft(pc1) * tf.batch_fft(pc2))
        return tf.real(conved)
예제 #26
0
 def _build_pos_and_neg_contribs(self):
     if (self.verbose):
         print("Heads-up: current implementation assumes maxpool layer "
               "is followed by a linear transformation (conv/dense layer)")
     #placeholder; not used for linear layer, hence assumption above
     return tf.zeros_like(tensor=self.get_activation_vars(),
                   name="dummy_pos_cont_"+str(self.get_name())),\
            tf.zeros_like(tensor=self.get_activation_vars(),
                   name="dummy_neg_cont_"+str(self.get_name()))
예제 #27
0
  def construct_model(self, images, actions, rewards):
    images = tf.unstack(images, axis=0)
    actions = tf.unstack(actions, axis=0)
    rewards = tf.unstack(rewards, axis=0)

    batch_size = common_layers.shape_list(images[0])[0]
    context_frames = self.hparams.video_num_input_frames

    # Predicted images and rewards.
    gen_rewards, gen_images, latent_means, latent_stds = [], [], [], []

    # LSTM states.
    lstm_state = [None] * 7

    # Create scheduled sampling function
    ss_func = self.get_scheduled_sample_func(batch_size)

    pred_image = tf.zeros_like(images[0])
    pred_reward = tf.zeros_like(rewards[0])
    latent = None
    for timestep, image, action, reward in zip(
        range(len(images)-1), images[:-1], actions[:-1], rewards[:-1]):
      # Scheduled Sampling
      done_warm_start = timestep > context_frames - 1
      groundtruth_items = [image, reward]
      generated_items = [pred_image, pred_reward]
      input_image, input_reward = self.get_scheduled_sample_inputs(
          done_warm_start, groundtruth_items, generated_items, ss_func)

      # Latent
      # TODO(mbz): should we use input_image iunstead of image?
      latent_images = tf.stack([image, images[timestep+1]], axis=0)
      latent_mean, latent_std = self.construct_latent_tower(
          latent_images, time_axis=0)
      latent = common_video.get_gaussian_tensor(latent_mean, latent_std)
      latent_means.append(latent_mean)
      latent_stds.append(latent_std)

      # Prediction
      pred_image, lstm_state, _ = self.construct_predictive_tower(
          input_image, input_reward, action, lstm_state, latent)

      if self.hparams.reward_prediction:
        pred_reward = self.reward_prediction(
            pred_image, input_reward, action, latent)
        pred_reward = common_video.decode_to_shape(
            pred_reward, common_layers.shape_list(input_reward), "reward_dec")
      else:
        pred_reward = input_reward

      gen_images.append(pred_image)
      gen_rewards.append(pred_reward)

    gen_images = tf.stack(gen_images, axis=0)
    gen_rewards = tf.stack(gen_rewards, axis=0)

    return gen_images, gen_rewards, latent_means, latent_stds
예제 #28
0
def tf_F1_score(actuals, predictions):
    actuals = tf.reshape(actuals, [-1, 1])
    predictions = tf.reshape(predictions, [-1, 1])

    ones_like_actuals = tf.ones_like(actuals)
    zeros_like_actuals = tf.zeros_like(actuals)
    ones_like_predictions = tf.ones_like(predictions)
    zeros_like_predictions = tf.zeros_like(predictions)

    #true-positive
    tp_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, ones_like_actuals),
                tf.equal(predictions, ones_like_predictions)
            ),
            dtype=tf.float32
        )
    )
    #true-Negative
    tn_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, zeros_like_actuals),
                tf.equal(predictions, zeros_like_predictions)
            ),
            dtype=tf.float32
        )
    )
    #false-positive
    fp_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, zeros_like_actuals),
                tf.equal(predictions, ones_like_predictions)
            ),
            dtype=tf.float32
        )
    )
    #false_Neg
    fn_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, ones_like_actuals),
                tf.equal(predictions, zeros_like_predictions)
            ),
            dtype=tf.float32
        )
    )

    accuracy = (tp_op + tn_op) / (tp_op + tn_op + fp_op + fn_op)
    prediction = tp_op / (tp_op + fp_op)
    recall = tp_op / (tp_op + fn_op)
    f1_score = (2 * (prediction * recall)) / (prediction + recall)

    return accuracy, [tp_op, tn_op, fp_op, fn_op, f1_score]
예제 #29
0
def sensitivity(logits, labels):
    predictions = tf.argmax(logits, axis=-1)
    actuals = tf.argmax(labels, axis=-1)


    nodule_actuals = tf.ones_like(actuals)
    non_nodule_actuals = tf.zeros_like(actuals)
    nodule_predictions = tf.ones_like(predictions)
    non_nodule_predictions = tf.zeros_like(predictions)

    tp_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, nodule_actuals),
                tf.equal(predictions, nodule_predictions)
            ),
            tf.float32
        )
    )

    tn_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, non_nodule_actuals),
                tf.equal(predictions, non_nodule_predictions)
            ),
            tf.float32
        )
    )

    fp_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, non_nodule_actuals),
                tf.equal(predictions, nodule_predictions)
            ),
            tf.float32
        )
    )

    fn_op = tf.reduce_sum(
        tf.cast(
            tf.logical_and(
                tf.equal(actuals, nodule_actuals),
                tf.equal(predictions, non_nodule_predictions)
            ),
            tf.float32
        )
    )

    false_positive_rate = fp_op / (fp_op + tn_op)

    recall = tp_op / (tp_op + fn_op)

    return recall, false_positive_rate
예제 #30
0
  def __init__(self, batch_env, step, is_training, should_log, config):
    """Create an instance of the PPO algorithm.

    Args:
      batch_env: In-graph batch environment.
      step: Integer tensor holding the current training step.
      is_training: Boolean tensor for whether the algorithm should train.
      should_log: Boolean tensor for whether summaries should be returned.
      config: Object containing the agent configuration as attributes.
    """
    self._batch_env = batch_env
    self._step = step
    self._is_training = is_training
    self._should_log = should_log
    self._config = config
    self._observ_filter = normalize.StreamingNormalize(self._batch_env.observ[0],
                                                       center=True,
                                                       scale=True,
                                                       clip=5,
                                                       name='normalize_observ')
    self._reward_filter = normalize.StreamingNormalize(self._batch_env.reward[0],
                                                       center=False,
                                                       scale=True,
                                                       clip=10,
                                                       name='normalize_reward')
    # Memory stores tuple of observ, action, mean, logstd, reward.
    template = (self._batch_env.observ[0], self._batch_env.action[0], self._batch_env.action[0],
                self._batch_env.action[0], self._batch_env.reward[0])
    self._memory = memory.EpisodeMemory(template, config.update_every, config.max_length, 'memory')
    self._memory_index = tf.Variable(0, False)
    use_gpu = self._config.use_gpu and utility.available_gpus()
    with tf.device('/gpu:0' if use_gpu else '/cpu:0'):
      # Create network variables for later calls to reuse.
      self._network(tf.zeros_like(self._batch_env.observ)[:, None],
                    tf.ones(len(self._batch_env)),
                    reuse=None)
      cell = self._config.network(self._batch_env.action.shape[1].value)
      with tf.variable_scope('ppo_temporary'):
        self._episodes = memory.EpisodeMemory(template, len(batch_env), config.max_length,
                                              'episodes')
        self._last_state = utility.create_nested_vars(cell.zero_state(len(batch_env), tf.float32))
        self._last_action = tf.Variable(tf.zeros_like(self._batch_env.action),
                                        False,
                                        name='last_action')
        self._last_mean = tf.Variable(tf.zeros_like(self._batch_env.action),
                                      False,
                                      name='last_mean')
        self._last_logstd = tf.Variable(tf.zeros_like(self._batch_env.action),
                                        False,
                                        name='last_logstd')
    self._penalty = tf.Variable(self._config.kl_init_penalty, False, dtype=tf.float32)
    self._policy_optimizer = self._config.policy_optimizer(self._config.policy_lr,
                                                           name='policy_optimizer')
    self._value_optimizer = self._config.value_optimizer(self._config.value_lr,
                                                         name='value_optimizer')
예제 #31
0
def naive_conv(full_data=False):
    train_data = np.load(fp.train)
    test_data_y = np.load(fp.test_no_fix)[:, 60:90]
    test_data_x = np.load(fp.test)[:, 0:60]
    source_data = None

    if (full_data):
        whole = np.load(fp.fix_data)
        whole = whole[:, :, 6:9, :]
        train_data = whole.reshape(132 * 92, 90)
        june = np.load(fp.fix_June)
        source_data = june.reshape(132 * 30, 60)

    fp.clear(fp.logdir)

    x = tf.placeholder(tf.float32, [None, n_input])
    y_ = tf.placeholder(tf.float32, [None, n_output])
    is_training = tf.placeholder(tf.bool)
    keep_prob = tf.placeholder(tf.float32)  #dropout (keep probability)

    # Create some wrappers for simplicity
    def conv1d(x, W, b):
        # Conv2D wrapper, with bias and relu activation
        x = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
        x = tf.nn.bias_add(x, b)
        return tf.nn.relu(x)

    def maxpool2d(x, k=kernel_size_pool):
        # MaxPool2D wrapper
        return tf.nn.max_pool(x,
                              ksize=[1, k, 1, 1],
                              strides=[1, 2, 1, 1],
                              padding='SAME')

    # Create model
    def conv_net(x, weights, biases, dropout):
        # Reshape input picture
        x = tf.reshape(x, shape=[-1, 60, 1, 1])

        # Convolution Layer
        conv1 = conv1d(x, weights['wc1'], biases['bc1'])
        # Max Pooling (down-sampling)
        conv1 = maxpool2d(conv1)

        # Convolution Layer
        conv2 = conv1d(conv1, weights['wc2'], biases['bc2'])
        # Max Pooling (down-sampling)
        conv2 = maxpool2d(conv2)

        # Fully connected layer
        # Reshape conv2 output to fit fully connected layer input

        fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])

        fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])

        bn = tf.contrib.layers.batch_norm(fc1,
                                          decay=0.9,
                                          center=True,
                                          scale=True,
                                          updates_collections=None,
                                          is_training=is_training,
                                          reuse=None,
                                          trainable=True,
                                          scope="bn")
        fc1 = tf.nn.relu(bn)
        # Apply Dropout
        fc1 = tf.nn.dropout(fc1, dropout)

        # Output, class prediction
        out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
        return out

    # Store layers weight & bias
    weights = {
        # 5x5 conv, 1 input, 32 outputs
        'wc1':
        tf.Variable(tf.truncated_normal([kernel_size, 1, 1, 10], stddev=dev)),
        # 5x5 conv, 32 inputs, 64 outputs
        'wc2':
        tf.Variable(tf.truncated_normal([kernel_size, 1, 10, 15], stddev=dev)),
        # fully connected, 7*7*64 inputs, 1024 outputs
        'wd1':
        tf.Variable(tf.truncated_normal([15 * 15, 200], stddev=dev)),
        # 1024 inputs, 10 outputs (class prediction)
        'out':
        tf.Variable(tf.truncated_normal([200, n_output], stddev=dev))
    }

    biases = {
        'bc1': tf.Variable(tf.constant(bias_init, shape=[10])),
        'bc2': tf.Variable(tf.constant(bias_init, shape=[15])),
        'bd1': tf.Variable(tf.constant(bias_init, shape=[200])),
        'out': tf.Variable(tf.constant(bias_init, shape=[n_output]))
    }

    # Construct model
    y = conv_net(x, weights, biases, keep_prob)

    # filter the zeros
    zeros = tf.cast(tf.zeros_like(y_), dtype=tf.bool)
    ones = tf.cast(tf.ones_like(y_), dtype=tf.bool)

    loc = tf.where(tf.equal(y_, 0), zeros, ones)
    rel_y = tf.boolean_mask(y_, loc)
    pred_y = tf.boolean_mask(y, loc)

    non_zeros = tf.cast(tf.count_nonzero(y_), tf.float32)  # for later usage
    diff = tf.abs(tf.subtract(rel_y, pred_y))
    ratio = tf.divide(diff, rel_y)
    cost = tf.reduce_sum(ratio)
    res = tf.divide(cost, non_zeros)

    tf.summary.scalar("count", cost)
    tf.summary.scalar("MAPE", res)

    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cost)

    # Initializing the variables
    init = tf.global_variables_initializer()

    # Launch the graph
    with tf.Session() as sess:
        mounts = 1
        for i in train_data.shape:
            mounts = mounts * i
        mounts = mounts // 90
        print("训练的总条数 9768 还是 12114: ", mounts)

        merged = tf.summary.merge_all()
        sess.run(init)
        train_writer = tf.summary.FileWriter(fp.logdir, sess.graph)

        for epoch in range(training_iters):
            train_sample = train_data[0:batch_size]
            train_x = train_sample[:, 0:60]
            train_y = train_sample[:, 60:90]
            sess.run(optimizer,
                     feed_dict={
                         x: train_x,
                         y_: train_y,
                         keep_prob: 0.6,
                         is_training: True
                     })
            np.random.shuffle(train_data)
            if (epoch % 50 == 0):
                summary, acc = sess.run(
                    [merged, res],
                    feed_dict={
                        x: test_data_x,
                        y_: test_data_y,
                        keep_prob: 1,
                        is_training: False
                    })
                train_writer.add_summary(summary, epoch)
                print("{0} : {1} {2}".format(epoch, acc, " "))

        if (full_data):
            res_june = sess.run(y,
                                feed_dict={
                                    x: source_data,
                                    keep_prob: 1.0,
                                    is_training: False
                                })
            res_june = res_june.reshape(132, 30, 30)
            full.write_to_file(res_june)
        train_writer.close()
예제 #32
0
    print(tf.convert_to_tensor([[1, 2], [2]]))  # 元素个数不统一, 无法转换
except Exception as e:
    print(e)

print(tf.convert_to_tensor(np.ones([3, 4, 2])))

print("----------")

# tf.zeros() / tf.ones() / tf.fill()

print(tf.zeros([]))  # 注意此处传入的是 shape
print(tf.zeros([3]))
print(tf.zeros([2, 3]))
print(tf.zeros([2, 3, 3]))

print(tf.zeros_like([1, 2, 3]))  # 此处传入的是任意一个 Tensor, 会返回一个与该 Tensor 的 shape 相同的填充满 0 的 Tensor.

print(tf.ones([3]))  # 以下与 tf.zeros() 同理
print(tf.ones_like([1, 2]))

print(tf.fill([2, 2], 0))  # 等价于 tf.zeros([2, 2])
print(tf.fill([3, 3], 9))

print("----------")

# tf.random.normal() 正态分布
print(tf.random.normal([3, 3], mean=1, stddev=1))  # mean 均值, stddev 标准差
print(tf.random.normal([3, 3]))  # 默认 mean = 0, stddev = 1

# 使用截断的正态分布防止梯度消失
print(tf.random.truncated_normal([3, 3]))  # 大于两个标准差的数据被舍去
예제 #33
0
def nan_to_zero(input_tensor):
    return tf.where(tf.math.is_nan(input_tensor), tf.zeros_like(input_tensor), input_tensor)
예제 #34
0
    def decode_infer_2(self):
        # stage 2, word level inference using decoded sequence
        # l_t = decode sequence length
        # during infer, following graph are constructed using beam search
        hidden_size = self.bert_config.hidden_size
        with self.graph.as_default():
            target_sequence = tf.squeeze(self.decode_seq, axis=1)
            target_sequence = self.trunct(target_sequence)
            target_length = self.decode_length
            target_seg_ids = tf.zeros_like(target_sequence,
                                           dtype=tf.int32,
                                           name='target_seg_ids_infer_2')
            tgt_mask = tf.sequence_mask(target_length,
                                        maxlen=tf.shape(target_sequence)[1],
                                        dtype=tf.float32)  # [b, q']

            is_training = self.is_training
            dec_model = modeling.BertModel(
                config=self.bert_config,
                is_training=is_training,
                input_ids=target_sequence,
                input_mask=tgt_mask,
                token_type_ids=target_seg_ids,
                scope='bert',
                reuse=tf.AUTO_REUSE,
                use_one_hot_embeddings=self.hps.use_tpu
            )  # use_one_hot_embeddings=Flags.tpu ?

            dec_output = dec_model.get_sequence_output()  # [b, l_t, h]
            tgt_embed = dec_output

            # with tf.variable_scope('bert', reuse=True):
            #     with tf.variable_scope('embeddings'), tf.device('/cpu:0'):
            #         # Perform embedding lookup on the target word ids.
            #         (tgt_embed, _) = embedding_lookup(
            #             input_ids=target_sequence,
            #             vocab_size=config.vocab_size,
            #             embedding_size=config.hidden_size,
            #             initializer_range=config.initializer_range,
            #             word_embedding_name='word_embeddings',
            #             use_one_hot_embeddings=False)
            #
            #         # Add positional embeddings and token type embeddings, then layer
            #         # normalize and perform dropout.
            #         tgt_embed = embedding_postprocessor(
            #             input_tensor=tgt_embed,
            #             use_token_type=True,
            #             token_type_ids=target_seg_ids,
            #             token_type_vocab_size=config.type_vocab_size,
            #             token_type_embedding_name='token_type_embeddings',
            #             use_position_embeddings=True,
            #             position_embedding_name='position_embeddings',
            #             initializer_range=config.initializer_range,
            #             max_position_embeddings=config.max_position_embeddings,
            #             dropout_prob=config.hidden_dropout_prob)

            with tf.variable_scope('decoder_2', reuse=True):
                masked_tgt_embed = tgt_embed * tf.expand_dims(tgt_mask, -1)
                second_dec_attn_bias = attention_bias(
                    tf.shape(masked_tgt_embed)[1], 'cloze_bias')
                infer_decoder_input = tf.pad(
                    masked_tgt_embed,
                    [[0, 0], [1, 0], [0, 0]])[:, :-1, :]  # Shift left
                all_att_weights, decoder_output = transformer_decoder(
                    infer_decoder_input,
                    self.enc_output,
                    second_dec_attn_bias,
                    self.enc_attn_bias,
                    self.hps,
                    scope='decoder_2')
                # [b, l_t, e] => [b*l_t, v]
                decoder_output = tf.reshape(decoder_output, [-1, hidden_size])
                second_logits = tf.matmul(decoder_output, self.decoder_weights,
                                          False, True)  # (b*l_t, v)
                vocab_probs = tf.nn.softmax(second_logits)  # [b * l_t, v]
                vocab_size = len(self.hps.vocab)
                with tf.variable_scope('copy', reuse=tf.AUTO_REUSE):
                    logits = calculate_final_logits(
                        decoder_output, all_att_weights, vocab_probs,
                        self.input_ids_oo, self.max_out_oovs, self.input_mask,
                        vocab_size, self.infer_tiled_len)  # [b * l_t, v + v']
                second_log_prob = tf.log(logits)
                # (b, l_t, v)
                extend_vocab_size = tf.add(tf.constant(vocab_size),
                                           self.max_out_oovs)
                second_log_prob = tf.reshape(
                    second_log_prob,
                    [-1, tf.shape(target_sequence)[1], extend_vocab_size])
                second_log_id = tf.argmax(second_log_prob, axis=-1)  # (b, l_t)
        return second_log_id
예제 #35
0
    def decode_infer(self, inputs, state):
        # state['enc']: [b * beam, l_s, e]  ,   state['dec']: [b * beam, q', e]
        # q' = previous decode output length
        # during infer, following graph are constructed using beam search
        with self.graph.as_default():
            config = self.bert_config

            target_sequence = inputs['target']
            target_sequence = self.trunct(target_sequence)
            target_length = inputs['target_length']
            target_seg_ids = tf.zeros_like(target_sequence,
                                           dtype=tf.int32,
                                           name='target_seg_ids_infer')
            tgt_mask = tf.sequence_mask(target_length,
                                        maxlen=tf.shape(target_sequence)[1],
                                        dtype=tf.float32)  # [b, q']

            with tf.variable_scope('bert', reuse=True):
                with tf.variable_scope('embeddings'), tf.device('/cpu:0'):
                    # Perform embedding lookup on the target word ids.
                    (tgt_embed, _) = embedding_lookup(
                        input_ids=target_sequence,
                        vocab_size=config.vocab_size,
                        embedding_size=config.hidden_size,
                        initializer_range=config.initializer_range,
                        word_embedding_name='word_embeddings',
                        use_one_hot_embeddings=False)

                    # Add positional embeddings and token type embeddings, then layer
                    # normalize and perform dropout.
                    tgt_embed = embedding_postprocessor(
                        input_tensor=tgt_embed,
                        use_token_type=True,
                        token_type_ids=target_seg_ids,
                        token_type_vocab_size=config.type_vocab_size,
                        token_type_embedding_name='token_type_embeddings',
                        use_position_embeddings=True,
                        position_embedding_name='position_embeddings',
                        initializer_range=config.initializer_range,
                        max_position_embeddings=config.max_position_embeddings,
                        dropout_prob=config.hidden_dropout_prob)

            with tf.variable_scope('decoder_1', reuse=True):
                # [b, l_t, e]
                masked_tgt_embed = tgt_embed * tf.expand_dims(tgt_mask, -1)
                dec_attn_bias = attention_bias(
                    tf.shape(masked_tgt_embed)[1], "causal")
                decoder_input = tf.pad(
                    masked_tgt_embed,
                    [[0, 0], [1, 0], [0, 0]])[:, :-1, :]  # Shift left

                infer_decoder_input = decoder_input[:, -1:, :]
                infer_dec_attn_bias = dec_attn_bias[:, :, -1:, :]

                all_att_weights, decoder_output, decoder_state = transformer_decoder(
                    infer_decoder_input,
                    self.enc_output,
                    infer_dec_attn_bias,
                    self.enc_attn_bias,
                    self.hps,
                    state=state['decoder'],
                    scope='decoder_1')
                decoder_output = decoder_output[:, -1, :]  # [b * beam, e]
                logits = tf.matmul(decoder_output, self.decoder_weights, False,
                                   True)  # [b * beam, v]
                vocab_probs = tf.nn.softmax(logits)  # [b * l_t, v]
                vocab_size = len(self.hps.vocab)
                with tf.variable_scope('copy', reuse=tf.AUTO_REUSE):
                    logits = calculate_final_logits(
                        decoder_output, all_att_weights, vocab_probs,
                        self.input_ids_oo, self.max_out_oovs, self.input_mask,
                        vocab_size, 1)  # [b * l_t, v + v']
                log_prob = tf.log(logits)
        return log_prob, {
            'encoder': state['encoder'],
            'decoder': decoder_state
        }
    def build_model(self):
        self.x_source = tf.placeholder(tf.float32,
                                       shape=[None, self.img_h, self.img_w, 1],
                                       name='x_source')
        self.x_target = tf.placeholder(tf.float32,
                                       shape=[None, self.img_h, self.img_w, 1],
                                       name='x_target')

        self.y_source = tf.placeholder(tf.int32,
                                       shape=[None, self.num_class],
                                       name='y_source')
        self.y_target = tf.placeholder(tf.int32,
                                       shape=[None, self.num_class],
                                       name='y_target')

        self.is_training = tf.placeholder(tf.bool, name='is_training')
        self.keep_rate = tf.placeholder(tf.float32, name='keep_rate')

        tf.summary.image('source_input', self.x_source)
        tf.summary.image('target_input', self.x_target)

        self.pred_source, self.pred_softmax_source, self.feature_source = self.resnet_model(
            input_x=self.x_source,
            model_name='classification_model',
            ksize=3,
            unit_num1=3,
            unit_num2=3,
            unit_num3=3,
            out_channel1=16,
            out_channel2=32,
            out_channel3=64,
            reuse=False)

        self.pred_target, self.pred_softmax_target, self.feature_target = self.resnet_model(
            input_x=self.x_target,
            model_name='classification_model',
            ksize=3,
            unit_num1=3,
            unit_num2=3,
            unit_num3=3,
            out_channel1=16,
            out_channel2=32,
            out_channel3=64,
            reuse=True)

        # feature consistency in scale 3
        self.tar_fea_cons_sc3 = self.ContentPreservedBlock(
            [self.feature_target[2], self.feature_target[3]],
            ksize=3,
            scope_name='cpb3',
            is_training=self.is_training,
            adjustScale=True,
            reuse=False)

        self.D_src_fea_sc3 = self.Discriminator(self.feature_source[3],
                                                ksize=3,
                                                scope_name='d3',
                                                is_training=self.is_training,
                                                keep_rate=self.keep_rate,
                                                reuse=False)

        self.D_tar_fea_sc3 = self.Discriminator(self.feature_target[3],
                                                ksize=3,
                                                scope_name='d3',
                                                is_training=self.is_training,
                                                keep_rate=self.keep_rate,
                                                reuse=True)

        with tf.variable_scope('loss'):
            # supervised loss
            self.supervised_loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(
                    logits=self.pred_source, labels=self.y_source))

            # feature consistency loss
            self.cpb_input = self.tar_fea_cons_sc3
            self.cpb_output = self.feature_target[2]
            self.cpb_loss = tf.losses.absolute_difference(
                labels=self.cpb_output, predictions=self.cpb_input)

            # g losses
            self.target_gloss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.D_tar_fea_sc3,
                    labels=tf.ones_like(self.D_tar_fea_sc3)))

            self.g_loss = self.target_gloss + self.cpb_loss + self.lbd * self.supervised_loss

            # d losses
            self.source_dloss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.D_src_fea_sc3,
                    labels=tf.ones_like(self.D_src_fea_sc3)))
            self.target_dloss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.D_tar_fea_sc3,
                    labels=tf.zeros_like(self.D_tar_fea_sc3)))

            self.d_loss = self.source_dloss + self.target_dloss

            tf.summary.scalar('Loss/supervised_loss', self.supervised_loss)
            tf.summary.scalar('Loss/target_gloss', self.target_gloss)
            tf.summary.scalar('Loss/total_gloss', self.g_loss)
            tf.summary.scalar('Loss/source_dloss', self.source_dloss)
            tf.summary.scalar('Loss/target_dloss', self.target_dloss)
            tf.summary.scalar('Loss/total_dloss', self.d_loss)
            tf.summary.scalar('Loss/cpb_loss', self.cpb_loss)

        with tf.variable_scope('optimization_variables'):
            self.t_var = [var for var in tf.trainable_variables()]

            self.cpb_var = [var for var in self.t_var if 'cpb3' in var.name]
            self.d_var = [var for var in self.t_var if 'd3' in var.name]

            self.encoder_var = [
                var for var in self.t_var if 'classification_model' in var.name
            ]

        with tf.variable_scope('reload_variables'):
            self.g_var = [var for var in tf.global_variables()]
            self.g_var_reload = [
                var for var in self.g_var if 'classification_model' in var.name
            ]

        with tf.variable_scope('optimize'):
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                self.cpb_trainOp = tf.train.AdamOptimizer(self.lr).minimize(
                    self.cpb_loss, var_list=self.cpb_var)
                self.d_trainOp = tf.train.AdamOptimizer(
                    self.lr, beta1=0.5).minimize(self.d_loss,
                                                 var_list=self.d_var)
                self.g_trainOp = tf.train.AdamOptimizer(
                    self.lr, beta1=0.5).minimize(self.g_loss,
                                                 var_list=self.encoder_var)
        with tf.variable_scope('tfSummary'):
            self.merged = tf.summary.merge_all()
            self.writer = tf.summary.FileWriter(self.ckptDir, self.sess.graph)

        with tf.variable_scope('saver'):
            var_list = tf.trainable_variables()
            g_list = tf.global_variables()
            bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
            bn_moving_vars += [
                g for g in g_list if 'moving_variance' in g.name
            ]
            var_list += bn_moving_vars
            self.saver = tf.train.Saver(var_list=var_list,
                                        max_to_keep=self.eps)

        with tf.variable_scope('accuracy'):
            self.distribution_source = [
                tf.argmax(self.y_source, 1),
                tf.argmax(self.pred_softmax_source, 1)
            ]
            self.distribution_target = [
                tf.argmax(self.y_target, 1),
                tf.argmax(self.pred_softmax_target, 1)
            ]

            self.correct_prediction_source = tf.equal(
                self.distribution_source[0], self.distribution_source[1])
            self.correct_prediction_target = tf.equal(
                self.distribution_target[0], self.distribution_target[1])

            self.accuracy_source = tf.reduce_mean(
                tf.cast(self.correct_prediction_source, 'float'))
            self.accuracy_target = tf.reduce_mean(
                tf.cast(self.correct_prediction_target, 'float'))
    def __init__(self,
                 n_in,
                 n_rec,
                 tau=20.,
                 thr=0.03,
                 dt=1.,
                 dtype=tf.float32,
                 dampening_factor=0.3,
                 tau_adaptation=200.,
                 beta=1.6,
                 stop_gradients=False,
                 w_in_init=None,
                 w_rec_init=None,
                 n_refractory=5,
                 w_scale=1.):
        self.n_refractory = n_refractory
        self.tau_adaptation = tau_adaptation
        self.beta = beta
        self.decay_b = np.exp(-dt / tau_adaptation)

        if np.isscalar(tau):
            tau = tf.ones(n_rec, dtype=dtype) * np.mean(tau)
        if np.isscalar(thr):
            thr = tf.ones(n_rec, dtype=dtype) * np.mean(thr)

        tau = tf.cast(tau, dtype=dtype)
        dt = tf.cast(dt, dtype=dtype)

        self.dampening_factor = dampening_factor
        self.stop_gradients = stop_gradients
        self.dt = dt
        self.n_in = n_in
        self.n_rec = n_rec
        self.data_type = dtype

        self._num_units = self.n_rec

        self.tau = tau
        self._decay = tf.exp(-dt / tau)
        self.thr = thr

        with tf.variable_scope('InputWeights'):
            # Input weights
            np.random.seed(5)
            init_w_in_var = w_in_init if w_in_init is not None else \
                (np.random.randn(n_in, n_rec) / np.sqrt(n_in)).astype(np.float32)
            init_w_in_var *= w_scale
            self.w_in_var = tf.Variable(init_w_in_var, dtype=dtype)
            self.w_in_val = self.w_in_var

        with tf.variable_scope('RecWeights'):
            np.random.seed(5)
            init_w_rec_var = w_rec_init if w_rec_init is not None else \
                (np.random.randn(n_rec, n_rec) / np.sqrt(n_rec)).astype(np.float32)
            init_w_rec_var *= w_scale
            self.w_rec_var = tf.Variable(init_w_rec_var, dtype=dtype)
            self.w_rec_val = self.w_rec_var

            self.recurrent_disconnect_mask = np.diag(np.ones(n_rec,
                                                             dtype=bool))

            self.w_rec_val = tf.where(self.recurrent_disconnect_mask,
                                      tf.zeros_like(self.w_rec_val),
                                      self.w_rec_val)  # Disconnect autotapse

        dw_val_dw_var_in = np.ones((n_in, self._num_units))
        dw_val_dw_var_rec = np.ones(
            (self._num_units, self._num_units)) - np.diag(
                np.ones(self._num_units))
        self.dw_val_dw_var = [dw_val_dw_var_in, dw_val_dw_var_rec]

        self.variable_list = [self.w_in_var, self.w_rec_var]
        self.built = True
def convAllRotationsCartesianOut(inpt, weightsShape, k_h, s_h, s_w, padding, first, num_bins=32, weightMask=None, steerable=False, cartesian=False, cartesianIn=False, useType="train"):
	if cartesianIn and not cartesian:
		exit("Not supported cartesian ouput with vectorised input.")
	outAngles = [a*2*numpy.pi/num_bins for a in range(num_bins)]
	# outAngles = [tf.constant([a*2*numpy.pi/num_bins]) for a in range(num_bins)]
	convPerAngle = []
	mask = []
	c_i = inpt.get_shape()[-1].value
	if steerable:
		if c_i != weightsShape[-2]:
			basis = misc.getScalarBasis(weightsShape[0], num_bins, weightsShape[-2])
		else:
			num_bins=1
			basis = None
	else:
		if c_i == weightsShape[-2]:
			num_bins=1
	for abin, angle in enumerate(outAngles):
		firstDec = ((abin==0) and (first))
		if steerable:
			with tf.variable_scope("weights", reuse=(not firstDec)) as scope:
				exit("Steerable cartesian not implemented yet.")
				rotatedWeights = misc.getScalarSteerableKernel(weightsShape[0], angle, abin, weightsShape[-2], weightsShape[-1], num_bins=num_bins, first=firstDec, basis=basis, returnAngles=True)
		else:
			with tf.variable_scope("weights", reuse=(not firstDec)) as scope:
				if cartesianIn:
					weights, biases = misc.getWeightsNBiases(first, weightsShape[0], weightsShape[1], weightsShape[-1], 2*num_bins*weightsShape[-2], s_h, s_w, wd=misc.default_nl, lossType="nl")
					rWeights = tf.transpose(transformations.rotateVectorField(tf.transpose(weights, [3,0,1,2]), angle, interpolation="BILINEAR"), [1,2,3,0])
				else:
					weights, biases = misc.getWeightsNBiases(first, weightsShape[0], weightsShape[1], weightsShape[-1], num_bins*weightsShape[-2], s_h, s_w, wd=misc.default_nl, lossType="nl")
					rWeights = tf.transpose(tf.contrib.image.rotate(tf.transpose(weights, [3,0,1,2]), angle, interpolation="BILINEAR"), [1,2,3,0])
			if num_bins == 1:
				rotatedWeights = rWeights
			else:
				rWeights = tf.reshape(rWeights, [weightsShape[0], weightsShape[1], num_bins, weightsShape[-2], weightsShape[-1]])
				inds = [i for i in range(num_bins-abin,num_bins)]+[i for i in range(num_bins-abin)]
				# inds = [i for i in range(abin,-1,-1)]+[i for i in range(num_bins-1,abin,-1)]
				inds = tf.constant(inds)
				angles = tf.cast(inds, tf.float32)*2*numpy.pi/num_bins
				angles = tf.reshape(angles, [1,1,1,num_bins, 1])
				rWeights = tf.gather(rWeights, inds, axis=-3)
				rotatedWeights = tf.unstack(rWeights, axis=-3)
		if weightMask == None:
			exit("Cartesian output without weight mask not implemented yet.")
		inptL = tf.split(inpt, num_bins, axis=-1)
		angleConv = tf.stack([tf.nn.conv2d(inptL[inA], rotatedWeights[inA]*weightMask, [1,s_h,s_w,1], padding) for inA in range(num_bins)], axis=-2)
		# angleConv = tf.stack([tf.nn.bias_add(tf.nn.conv2d(inptL[inA], rotatedWeights[inA]*weightMask, [1,s_h,s_w,1], padding), biases/num_bins) for inA in range(num_bins)], axis=-2)
		inShape = inpt.get_shape().as_list()
		outShape = angleConv.get_shape().as_list()[:-2]
		angles = tf.tile(angles, outShape+[1,weightsShape[-1]])
		# angles=tf.reshape(angles, inShape[:-1]+[weightsShape[-1]*num_bins])
		magnSum = tf.expand_dims(tf.reduce_sum(angleConv, axis=-2), axis=-1)
		magnSum = tf.reshape(tf.tile(magnSum, [1, 1, 1, 1, 2]), outShape+[2*weightsShape[-1]])
		# cart = transformations.changeToCartesian(tf.nn.relu(angleConv), angles)
		cart = transformations.changeToCartesian(angleConv, angles)
		cart = tf.reduce_sum(cart, axis=-2)
		cart = tf.where(magnSum>0, cart, tf.zeros_like(cart))
		if useType == "train":
			with tf.device("/cpu:0"):
				tf.summary.scalar("relued", tf.reduce_sum(tf.cast(tf.equal(cart,0), tf.float32))/tf.size(cart, out_type=tf.float32))
		cart = transformations.rotateVectors(tf.transpose(cart, [1,2,3,0]), angle)
		cart = tf.transpose(cart, [3,0,1,2])
		convPerAngle.append(cart)
		# magnSum = tf.reduce_sum(angleConv, axis=-2)
		# mask.append(tf.where(magnSum>0, tf.ones_like(magnSum), tf.ones_like(magnSum)*(-1)))
	# convPerAngle = tf.concat(convPerAngle, axis=-1)
	convPerAngle = tf.stack(convPerAngle, axis=-2)
	# mask = tf.stack(mask, axis=-2)
	mask = None
	return convPerAngle, mask
def cliffordConvAllRotations(inpt, weightsShape, k_h, s_h, s_w, padding, first, num_bins=32, num_angles=4, weightMask=None, steerable=False, cartesian=False, cartesianIn=False):
	outAngles = [[a*2*numpy.pi/num_bins] for a in range(num_bins+1)]
	rotatedWeightsL = []
	for abin, angle in enumerate(outAngles[:-1]):
		firstDec = ((abin==0) and (first))
		with tf.variable_scope("weights", reuse=(not firstDec)) as scope:
			weights, biases = misc.getWeightsNBiases(firstDec, weightsShape[0], weightsShape[1], weightsShape[-1], 2*num_bins*weightsShape[-2], s_h, s_w, wd=misc.default_nl, lossType="nl")
			if abin == 0:
				fcb = biases
			rWeights = transformations.rotateVectorField(weights, angle)
			rWeights = tf.reshape(rWeights, [weightsShape[0], weightsShape[1], num_bins, weightsShape[-2]*2, weightsShape[-1]])
			# inds = [i for i in range(abin,-1,-1)]+[i for i in range(num_bins-1,abin,-1)]
			inds = [i for i in range(num_bins-abin,num_bins)]+[i for i in range(num_bins-abin)]
			inds = tf.constant(inds)
			rWeights = tf.gather(rWeights, inds, axis=-3)
			rotatedWeightsL.append(tf.reshape(rWeights, [weightsShape[0], weightsShape[1], num_bins*weightsShape[-2]*2, weightsShape[-1]]))
	rotatedWeightsL.append(rotatedWeightsL[0])

	thetas = []
	conv0L = []
	for angle in range(0, num_bins, num_bins//num_angles):
		weightSet = rotatedWeightsL[angle]
		fcConv2 = conv_2tf(inpt, weightSet, num_bins*weightsShape[-2], weightsShape[-1], 1, 1, "VALID")
		fcConv0 = tf.nn.conv2d(inpt, weightSet, [1,1,1,1], "VALID")
		angleMask = (tf.square(fcConv0) + tf.square(fcConv2)) < 1.5e-4
		fcConv0 = tf.where(angleMask, tf.ones_like(fcConv0)*(-1), fcConv0)
		fcConv2 = tf.where(angleMask, tf.zeros_like(fcConv2), fcConv2)
		thetas.append(tf.atan2(fcConv2, fcConv0))
		conv0L.append(fcConv0)

	thetas = tf.stack(thetas, axis=-1)
	conv0 = tf.stack(conv0L, axis=-1)
	# winner = tf.argmin(tf.abs(thetas), axis=-1, output_type=tf.int32)
	winner = tf.argmax(conv0, axis=-1, output_type=tf.int32)
	thetas2 = ops.reduceIndex(thetas, winner)
	thetas2, convMask = ops.offsetCorrect(thetas2, [2*numpy.pi/num_angles])
	winnerInd = tf.cast(winner * (num_bins//num_angles), tf.int32)
	quantized = tf.cast(tf.round(thetas2*num_bins/(2*numpy.pi)), tf.int32) + winnerInd
	quantized = tf.where(quantized>num_bins , quantized-num_bins , quantized)
	quantized = tf.where(quantized<0, quantized+num_bins , quantized)
	# wrong = tf.reduce_any(tf.greater(quantized, 0))
	# wrong2 = tf.reduce_any(tf.less(quantized, num_bins))
	# asserts = [tf.Assert(wrong, [-1]), tf.Assert(wrong2, [+1])]
	floatMask = tf.cast(convMask, tf.float32)
	# woaPlus = [rotatedWeightsL[i+1] for i in range(num_bins)] + [rotatedWeightsL[1]]
	# woaMinus = [rotatedWeightsL[num_bins-1]] + [rotatedWeightsL[i-1] for i in range(1,num_bins+1)]
	# woaL = []
	rMask = []
	for rotation in range(num_bins+1):
		rMask.append(tf.cast(tf.equal(quantized, rotation), tf.float32))
		# woaL.append((woaPlus[rotation] - woaMinus[rotation])*num_bins/(4*numpy.pi))
		# flatConvCudaResL.append(tf.nn.conv2d(inpt, rotatedWeightsL[rotation], [1,1,1,1], "VALID")*rMask)
	rMask = tf.concat(rMask, axis=-1)
	rotatedWeights = tf.concat(rotatedWeightsL, axis=-1)
	# woa = tf.concat(woaL, axis=-1)
	# with tf.control_dependencies(asserts):
	flatConvCudaRes = tf.nn.conv2d(inpt, rotatedWeights, [1,1,1,1], "VALID")*rMask
	# flatCorrection = tf.nn.conv2d(inpt, woa, [1,1,1,1], "VALID")*rMask
	inShape = inpt.get_shape().as_list()
	flatConvCudaRes = tf.reshape(flatConvCudaRes, [inShape[0], 1, 1, num_bins+1, weightsShape[-1]])
	# flatCorrection = tf.reshape(flatCorrection, [inShape[0], 1, 1, num_bins+1, weightsShape[-1]])
	convRes = tf.reduce_sum(flatConvCudaRes, axis=-2)*floatMask
	# correction = tf.reduce_sum(flatCorrection, axis=-2)*floatMask
	rotatedWeights = tf.stack(rotatedWeightsL, axis=0)
	# convRes = op_gradients.weightToThetaGrads(inpt, convRes, floatMask, rotatedWeights, quantized, thetas2)
	outAngles = tf.concat(outAngles, axis=0)
	resAngleQuantized = tf.gather(outAngles, quantized)
	resAngle = thetas2 + tf.gather(outAngles, winnerInd)
	# resAngle = resAngleQuantized
	# diff = tf.abs(resAngle - resAngleQuantized)
	diff = resAngleQuantized - resAngle
	# diff = resAngle - resAngleQuantized

	convRes = convRes/tf.cos(diff)
	# convRes = convRes + correction*diff
	# convRes = convRes/tf.stop_gradient(tf.cos(diff))
	# fc = convRes
	fc = tf.nn.bias_add(convRes, fcb)
	regulatedAngles = resAngle
	if first:
		tf.add_to_collection("activationMagnitudes", fc)
		tf.add_to_collection("activationAngles", resAngle)
	return fc, regulatedAngles
def fcCliffordLayer(inpt, c_i, c_o, s_h, s_w, first, useType, wd=misc.default_wd, num_bins=16, num_angles=4, normalize=True, resuse_batch_norm=False):
	fcW, fcb = misc.getWeightsNBiases(first, 1, 1, c_o, 2*c_i, s_h, s_w, wd=wd, lossType="nl")
	weightsShape = fcW.get_shape().as_list()
	offset = num_bins//(2*num_angles)
	angles = [tf.constant([(a-offset)*2*numpy.pi/num_bins]) for a in range(num_bins+1)]
	rotatedWeightsL = []
	thetas = []
	conv0L = []
	for angle in angles:
		rotatedWeightsL.append(transformations.rotateVectors(fcW, angle))

	weightsForAngleMeasurement = tf.stack([rotatedWeightsL[i+offset] for i in range(0, num_bins, num_bins//num_angles)], axis=-1)
	weightsForAngleMeasurement = tf.reshape(weightsForAngleMeasurement, weightsShape[:-1]+[num_angles*c_o])
	rotatedWeights = tf.stack(rotatedWeightsL, axis=0)
	quantized, thetas2, convMask, winnerInd = getRotation(inpt, weightsForAngleMeasurement, c_i, c_o, s_h, s_w, offset, "VALID", num_angles, num_bins)
	# for angle in range(0, num_bins, num_bins//num_angles):
	# 	weightSet = rotatedWeightsL[angle + offset]
	# 	if useType == "train" and first:
	# 		with tf.variable_scope(str(angle)) as scope:
	# 			fcConv2 = conv_2tf(inpt, weightSet, c_i, c_o, 1, 1, "VALID", monitor=False)
	# 	else:
	# 		fcConv2 = conv_2tf(inpt, weightSet, c_i, c_o, 1, 1, "VALID")
	# 	fcConv0 = tf.nn.conv2d(inpt, weightSet, [1,1,1,1], "VALID")
	# 	angleMask = (tf.square(fcConv0) + tf.square(fcConv2)) < 1.5e-4
	# 	fcConv0 = tf.where(angleMask, tf.ones_like(fcConv0)*(-1), fcConv0)
	# 	fcConv2 = tf.where(angleMask, tf.zeros_like(fcConv2), fcConv2)
	# 	thetas.append(tf.atan2(fcConv2, fcConv0))
	# 	conv0L.append(fcConv0)

	# thetas = tf.stack(thetas, axis=-1)
	# conv0 = tf.stack(conv0L, axis=-1)
	# # winner = tf.argmin(tf.abs(thetas), axis=-1, output_type=tf.int32)
	# winner = tf.argmax(conv0, axis=-1, output_type=tf.int32)
	# thetas2 = ops.reduceIndex(thetas, winner)
	# thetas2, convMask = ops.offsetCorrect(thetas2, [2*numpy.pi/num_angles])
	# tf.add_to_collection("conv_mask_by_angle", convMask)
	# winnerInd = tf.cast(winner * (num_bins//num_angles), tf.int32) + offset
	# quantized = tf.cast(tf.round(thetas2*num_bins/(2*numpy.pi)), tf.int32) + winnerInd
	# quantized = tf.where(quantized>num_bins , quantized-num_bins , quantized)
	# quantized = tf.where(quantized<0, quantized+num_bins , quantized)
	floatMask = tf.cast(convMask, tf.float32)

	flatConvCudaResL = []
	for rotation in range(num_bins+1):
		rMask = tf.cast(tf.equal(quantized, rotation), tf.float32)
		flatConvCudaResL.append(tf.nn.conv2d(inpt, rotatedWeightsL[rotation], [1,1,1,1], "VALID")*rMask)
	rotatedWeights = tf.stack(rotatedWeightsL, axis=0)
	convRes = tf.reduce_sum(tf.stack(flatConvCudaResL, axis=0), axis=0)*floatMask
	# convRes = op_gradients.weightToThetaGrads(inpt, convRes, floatMask, rotatedWeights, quantized, thetas2)
	angles = tf.concat(angles, axis=0)
	resAngleQuantized = tf.gather(angles, quantized)
	resAngle = thetas2 + tf.gather(angles, winnerInd)
	# diff = tf.abs(resAngle - resAngleQuantized)
	# diff = resAngle - resAngleQuantized
	diff = resAngleQuantized - resAngle
	convRes = convRes/tf.cos(diff)
	# convRes = convRes*resMultiplier/tf.cos(diff)
	# convRes = convRes/tf.stop_gradient(tf.cos(diff))
	if normalize:
		# fc = batch_norm(convRes, useType, resuse_batch_norm) * floatMask
		fc = batch_norm_only_rescale_learn_scale(convRes, useType, resuse_batch_norm)*floatMask
		fc = tf.nn.relu(fc)
		reluMask = tf.where(fc>0, tf.ones_like(fc), tf.zeros_like(fc))
		regulatedAngles = maskAngleGradients(resAngle, reluMask)
	else:
		fc = tf.nn.bias_add(convRes, fcb)
		regulatedAngles = resAngle
	tf.add_to_collection("activationMagnitudes", fc)
	tf.add_to_collection("activationAngles", resAngle)
	return fc, regulatedAngles
예제 #41
0
def overshooting(cell,
                 target,
                 embedded,
                 prev_action,
                 length,
                 amount,
                 ignore_input=False):
    """Perform open loop rollouts from the posteriors at every step.

  First, we apply the encoder to embed raw inputs and apply the model to obtain
  posterior states for every time step. Then, we perform `amount` long open
  loop rollouts from these posteriors.

  Note that the actions should be those leading to the current time step. So
  under common convention, it contains the last actions while observations are
  the current ones.

  Input:

    target, embedded:
      [A B C D E F] [A B C D E  ]

    prev_action:
      [0 A B C D E] [0 A B C D  ]

    length:
      [6 5]

    amount:
      3

  Output:

    prior, posterior, target:
      [A B C D E F] [A B C D E  ]   o---- chunk_length-->
      [B C D E F  ] [B C D E    ]   |
      [C D E F    ] [C D E      ]   |
      [D E F      ] [D E        ]   amount
                                    |
    mask:
      [1 1 1 1 1 1] [1 1 1 1 1 0]
      [1 1 1 1 1 0] [1 1 1 1 0 0]
      [1 1 1 1 0 0] [1 1 1 0 0 0]
      [1 1 1 0 0 0] [1 1 0 0 0 0]

  """
    # Closed loop unroll to get posterior states, which are the starting points
    # for open loop unrolls. We don't need the last time step, since we have no
    # targets for unrolls from it.
    use_obs = tf.ones(
        tf.shape(nested.flatten(embedded)[0][:, :, :1])[:3],
        tf.bool)  # shape(40,50,1024) --> shape(40,50,1)
    use_obs = tf.cond(tf.convert_to_tensor(ignore_input),
                      lambda: tf.zeros_like(use_obs, tf.bool), lambda: use_obs)
    (prior, posterior), _ = tf.nn.dynamic_rnn(
        cell,
        (embedded, prev_action, use_obs),
        length,
        dtype=tf.
        float32,  # cell, inputs:shape(batchsize,max_time,?):(40,50,?), sequence_length:shape(batchsize,):(40,)
        swap_memory=True)  # calculate posterior: q(s_t−d |o ≤t−d ,a <t−d )

    # Arrange inputs for every iteration in the open loop unroll. Every loop
    # iteration below corresponds to one row in the docstring illustration.
    max_length = shape.shape(nested.flatten(embedded)[0])[1]  # max_length = 50
    first_output = {
        'observ': embedded,  # shape(40,50,1024)
        'prev_action': prev_action,  # shape(40,50,2)
        'posterior': posterior,  # {'mean':shape(40,50,30), ...}
        'target': target,  # {'reward': shape(40,50), ...}
        'mask': tf.sequence_mask(length, max_length, tf.int32),  # shape(40,50)
    }
    progress_fn = lambda tensor: tf.concat([
        tensor[:, 1:], 0 * tensor[:, :1]
    ], 1)  # on the 1st dimension(episode_length): (a[1] ,a[2], ..., 0*a[0])
    other_outputs = tf.scan(  # other_outputs: { 'observ': shape(50(amount),40(batchsize),50(episode_length),1024),...}
        lambda past_output, _: nested.map(
            progress_fn, past_output
        ),  # past_output = progress_fn(past_output), initial past_output is first_output.
        tf.range(amount),
        first_output)  # first_output: { 'observ': shape(40,50,1024),...};
    sequences = nested.map(
        lambda lhs, rhs: tf.concat([lhs[None], rhs], 0),  # first_output[None]
        first_output,
        other_outputs)  # sequences: { 'observ': shape(51,40,50,1024),...};

    # Merge batch and time dimensions of steps to compute unrolls from every
    # time step as one batch. The time dimension becomes the number of
    # overshooting distances.
    sequences = nested.map(
        lambda tensor: _merge_dims(
            tensor, [1, 2]),  # sequences: { 'observ': shape(51,2000,1024),...}
        sequences)
    sequences = nested.map(
        lambda tensor: tf.transpose(tensor, [1, 0] + list(
            range(2, tensor.shape.ndims))),  # [1,0]+[2]
        sequences)  # sequences: { 'observ': shape(2000,51,1024),...}
    merged_length = tf.reduce_sum(sequences['mask'],
                                  1)  # shape(2000,51) --> shape(2000,)

    # Mask out padding frames; unnecessary if the input is already masked.
    sequences = nested.map(
        lambda tensor: tensor * tf.cast(
            _pad_dims(
                sequences['mask'], tensor.shape.ndims
            ),  # sequences['mask']: shape(2000,51) --> shape(2000,51,1); sequences['observ']: shape(2000,51,1024)
            tensor.dtype),  # shape(2000,51,1024)*shape(2000,51,1)
        sequences)

    # Compute open loop rollouts.
    use_obs = tf.zeros(tf.shape(sequences['mask']), tf.bool)[..., None]
    prev_state = nested.map(
        lambda tensor: tf.concat(
            [0 * tensor[:, :1], tensor[:, :-1]], 1
        ),  # {'mean': shape(40,50,30), ...}; on the 1st dimension(episode_length): (s1, s2, ..., s50) --> (0, s1, s2, ..., s49)
        posterior)
    prev_state = nested.map(lambda tensor: _merge_dims(tensor, [0, 1]),
                            prev_state)  # {'mean': shape(2000,30), ...}
    (priors, _), _ = tf.nn.dynamic_rnn(
        cell, (sequences['observ'], sequences['prev_action'], use_obs),
        merged_length, prev_state
    )  # initial_state = prev_state.    # calculate prior: p(s_t−1 |s_t−d ,a_t−d−1:t−2 )

    # Restore batch dimension.
    target, prior, posterior, mask = nested.map(
        functools.partial(_restore_batch_dim,
                          batch_size=shape.shape(length)[0]),
        (sequences['target'], priors, sequences['posterior'],
         sequences['mask']))

    mask = tf.cast(mask, tf.bool)
    return target, prior, posterior, mask
예제 #42
0
    def __init__(self,
                 graph_path,
                 target_size=(320, 240),
                 tf_config=None,
                 trt_bool=False):
        self.target_size = target_size

        # load graph
        logger.info('loading graph from %s(default size=%dx%d)' %
                    (graph_path, target_size[0], target_size[1]))
        with tf.gfile.GFile(graph_path, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        if trt_bool is True:
            output_nodes = ["Openpose/concat_stage7"]
            graph_def = trt.create_inference_graph(
                graph_def,
                output_nodes,
                max_batch_size=1,
                max_workspace_size_bytes=1 << 20,
                precision_mode="FP16",
                # precision_mode="INT8",
                minimum_segment_size=3,
                is_dynamic_op=True,
                maximum_cached_engines=int(1e3),
                # use_calibration=True,
            )

        self.graph = tf.get_default_graph()
        tf.import_graph_def(graph_def, name='TfPoseEstimator')
        self.persistent_sess = tf.Session(graph=self.graph, config=tf_config)

        for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]:
            print(ts)

        self.tensor_image = self.graph.get_tensor_by_name(
            'TfPoseEstimator/image:0')
        self.tensor_output = self.graph.get_tensor_by_name(
            'TfPoseEstimator/Openpose/concat_stage7:0')
        self.tensor_heatMat = self.tensor_output[:, :, :, :19]
        self.tensor_pafMat = self.tensor_output[:, :, :, 19:]
        self.upsample_size = tf.placeholder(dtype=tf.int32,
                                            shape=(2, ),
                                            name='upsample_size')
        self.tensor_heatMat_up = tf.image.resize_area(
            self.tensor_output[:, :, :, :19],
            self.upsample_size,
            align_corners=False,
            name='upsample_heatmat')
        self.tensor_pafMat_up = tf.image.resize_area(
            self.tensor_output[:, :, :, 19:],
            self.upsample_size,
            align_corners=False,
            name='upsample_pafmat')
        if trt_bool is True:
            smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0, 19)
        else:
            smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0)
        gaussian_heatMat = smoother.get_output()

        max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat,
                                          window_shape=(3, 3),
                                          pooling_type='MAX',
                                          padding='SAME')
        self.tensor_peaks = tf.where(
            tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat,
            tf.zeros_like(gaussian_heatMat))

        self.heatMat = self.pafMat = None

        # warm-up
        self.persistent_sess.run(
            tf.variables_initializer([
                v for v in tf.global_variables() if v.name.split(':')[0] in [
                    x.decode('utf-8') for x in self.persistent_sess.run(
                        tf.report_uninitialized_variables())
                ]
            ]))
        self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
            feed_dict={
                self.tensor_image: [
                    np.ndarray(shape=(target_size[1], target_size[0], 3),
                               dtype=np.float32)
                ],
                self.upsample_size: [target_size[1], target_size[0]]
            })
        self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
            feed_dict={
                self.tensor_image: [
                    np.ndarray(shape=(target_size[1], target_size[0], 3),
                               dtype=np.float32)
                ],
                self.upsample_size: [target_size[1] // 2, target_size[0] // 2]
            })
        self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
            feed_dict={
                self.tensor_image: [
                    np.ndarray(shape=(target_size[1], target_size[0], 3),
                               dtype=np.float32)
                ],
                self.upsample_size: [target_size[1] // 4, target_size[0] // 4]
            })

        # logs
        if self.tensor_image.dtype == tf.quint8:
            logger.info('quantization mode enabled.')
예제 #43
0
def main(sess):
    dloader = DataLoader(config['DATA_PATH'])

    #tf.reset_default_graph()
    batch_size = config['BATCH_SIZE']
    n_noise = config['N_NOISE']

    X_in = tf.placeholder(dtype=tf.float32,
                          shape=[None] + config['IMAGE_DIM'],
                          name='X')
    noise = tf.placeholder(dtype=tf.float32, shape=[None, n_noise])

    rate = tf.placeholder(dtype=tf.float32, name='rate')
    is_training = tf.placeholder(dtype=tf.bool, name='is_training')

    g = generator(noise, rate=rate, is_training=is_training)
    d_real = discriminator(X_in, rate=rate)
    d_fake = discriminator(g, rate, reuse=True)

    vars_g = [
        var for var in tf.trainable_variables()
        if var.name.startswith("generator")
    ]
    vars_d = [
        var for var in tf.trainable_variables()
        if var.name.startswith("discriminator")
    ]

    d_reg = tf.contrib.layers.apply_regularization(
        tf.contrib.layers.l2_regularizer(1e-6), vars_d)
    g_reg = tf.contrib.layers.apply_regularization(
        tf.contrib.layers.l2_regularizer(1e-6), vars_g)

    loss_d_real = binary_cross_entropy(tf.ones_like(d_real), d_real)
    loss_d_fake = binary_cross_entropy(tf.zeros_like(d_fake), d_fake)

    loss_g = tf.reduce_mean(binary_cross_entropy(tf.ones_like(d_fake), d_fake))
    loss_d = tf.reduce_mean(0.5 * (loss_d_real + loss_d_fake))

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

    with tf.control_dependencies(update_ops):
        optimizer_d = tf.train.RMSPropOptimizer(
            learning_rate=0.00015).minimize(loss_d + d_reg, var_list=vars_d)
        optimizer_g = tf.train.RMSPropOptimizer(
            learning_rate=0.00015).minimize(loss_g + g_reg, var_list=vars_g)

    sess.run(tf.global_variables_initializer())

    for i in range(200000):
        train_d = True
        train_g = True
        rate_train = 0.4

        n = np.random.uniform(0.0, 1.0,
                              [batch_size, n_noise]).astype(np.float32)
        batch = [
            np.reshape(b, config['IMAGE_DIM'])
            for b in dloader.next_batch(batch_size)
        ]

        d_real_ls, d_fake_ls, g_ls, d_ls = sess.run(
            [loss_d_real, loss_d_fake, loss_g, loss_d],
            feed_dict={
                X_in: batch,
                noise: n,
                rate: rate_train,
                is_training: True
            })

        d_real_ls = np.mean(d_real_ls)
        d_fake_ls = np.mean(d_fake_ls)
        g_ls = g_ls
        d_ls = d_ls

        if g_ls * 1.5 < d_ls:
            train_g = False
            pass
        if d_ls * 2 < g_ls:
            train_d = False
            pass

        if train_d:
            sess.run(optimizer_d,
                     feed_dict={
                         noise: n,
                         X_in: batch,
                         rate: rate_train,
                         is_training: True
                     })

        if train_g:
            sess.run(optimizer_g,
                     feed_dict={
                         noise: n,
                         rate: rate_train,
                         is_training: True
                     })

        if i % 500 == 0:
            print("Generator loss: ", g_ls, "Discriminator loss: ", d_ls,
                  "Step: ", i)
예제 #44
0
        def __graph__():

            # placeholders
            tf.reset_default_graph()
            #  encoder inputs : list of indices of length xseq_len
            self.enc_ip = [
                tf.placeholder(shape=[
                    None,
                ],
                               dtype=tf.int64,
                               name='ei_{}'.format(t)) for t in range(xseq_len)
            ]

            #  labels that represent the real outputs
            self.labels = [
                tf.placeholder(shape=[
                    None,
                ],
                               dtype=tf.int64,
                               name='ei_{}'.format(t)) for t in range(yseq_len)
            ]

            #  decoder inputs : 'GO' + [ y1, y2, ... y_t-1 ]
            self.dec_ip = [
                tf.zeros_like(self.enc_ip[0], dtype=tf.int64, name='GO')
            ] + self.labels[:-1]

            # Basic LSTM cell wrapped in Dropout Wrapper
            self.keep_prob = tf.placeholder(tf.float32)
            # define the basic cell
            basic_cell = tf.contrib.rnn.DropoutWrapper(
                tf.contrib.rnn.BasicLSTMCell(emb_dim, state_is_tuple=True),
                output_keep_prob=self.keep_prob)
            # stack cells together : n layered model
            stacked_lstm = tf.contrib.rnn.MultiRNNCell([basic_cell] *
                                                       num_layers,
                                                       state_is_tuple=True)

            # for parameter sharing between training model
            #  and testing model
            with tf.variable_scope('decoder') as scope:
                # build the seq2seq model
                #  inputs : encoder, decoder inputs, LSTM cell type, vocabulary sizes, embedding dimensions
                self.decode_outputs, self.decode_states = tf.contrib.legacy_seq2seq.embedding_rnn_seq2seq(
                    self.enc_ip, self.dec_ip, stacked_lstm, xvocab_size,
                    yvocab_size, emb_dim)
                # share parameters
                scope.reuse_variables()
                # testing model, where output of previous timestep is fed as input
                #  to the next timestep
                self.decode_outputs_test, self.decode_states_test = tf.contrib.legacy_seq2seq.embedding_rnn_seq2seq(
                    self.enc_ip,
                    self.dec_ip,
                    stacked_lstm,
                    xvocab_size,
                    yvocab_size,
                    emb_dim,
                    feed_previous=True)

            # now, for training,
            #  build loss function

            # weighted loss
            #  TODO : add parameter hint
            loss_weights = [
                tf.ones_like(label, dtype=tf.float32) for label in self.labels
            ]
            self.loss = tf.contrib.legacy_seq2seq.sequence_loss(
                self.decode_outputs, self.labels, loss_weights, yvocab_size)
            # train op to minimize the loss
            self.train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(
                self.loss)
예제 #45
0
# -*- coding: utf-8 -*-
import tensorflow as tf
"""tf各种变量,包括variable, constant数据类型的参数
在经过最后一步的初始化之前,调用*.eval()输出参数内容都会出ValueError错
"""
# tf操作类似numpy
# 常量矩阵, 推荐使用float32
tf.zeros([3, 4], tf.int32)

tensor = [[1, 2, 3], [4, 5, 6]]
# 同样尺寸大小的新tf矩阵, 内容全部为0
# tensor为[[1, 2, 3], [4, 5, 6]]
# 则,下面输出为 同尺寸大小的全0数组 [[0, 0, 0], [0, 0, 0]]
tf.zeros_like(tensor)

# 类似zeros函数, ones函数操作
# [[1, 1, 1], [1, 1, 1]]
tf.ones([2, 3], tf.int32)

tf.ones_like(tensor)

# 常量1-D, 直接输出 其值 依然会报错
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7])
# print tensor.eval()

# 常量2-D, 输出 [[-1, -1, -1], [-1, -1, -1]]
tf.constant(-1.0, shape=[2, 3])

# 线型等分, 3表示分成3分[10.0, 11.0, 12.0]
tensor = tf.linspace(10.0, 12.0, 3, name='linspace')
# tensor = tf.linspace(10.0, 12.0, 3, name='linspace')
예제 #46
0
        def step(i, preds, scores, cache_decoder, cache_lm, logits,
                 len_decoded, finished):
            """
            the cache has no specific shape, so no can be put in the all_states
            """
            preds_emb = self.embedding(preds)
            decoder_input = preds_emb

            decoder_output, cache_decoder = self.decoder_with_caching_impl(
                decoder_input, cache_decoder, encoded, encoder_attention_bias)

            cur_logit = tf.layers.dense(inputs=decoder_output[:, -1, :],
                                        units=self.dim_output,
                                        activation=None,
                                        use_bias=False,
                                        name='decoder_fc')

            logits = tf.concat([logits, cur_logit[:, None]], 1)
            z = tf.nn.log_softmax(cur_logit)  # [batch*beam, size_output]

            # the langueage model infer
            if self.args.model.shallow_fusion:
                assert self.lm
                preds_emb = self.lm.decoder.embedding(preds)

                with tf.variable_scope(self.args.top_scope, reuse=True):
                    with tf.variable_scope(self.args.lm_scope):
                        lm_output, cache_lm = self.lm.decoder.decoder_with_caching_impl(
                            preds_emb, cache_lm)
                        logit_lm = dense(inputs=lm_output[:, -1, :],
                                         units=self.dim_output,
                                         kernel=tf.transpose(
                                             self.lm.decoder.fully_connected),
                                         use_bias=False)
                z_lm = self.lambda_lm * tf.nn.log_softmax(
                    logit_lm)  # [batch*beam, size_output]
            else:
                z_lm = tf.zeros_like(z)

            # rank the combined scores
            next_scores, next_preds = tf.nn.top_k(z + z_lm,
                                                  k=beam_size,
                                                  sorted=True)
            next_preds = tf.to_int32(next_preds)

            # beamed scores & Pruning
            scores = scores[:,
                            None] + next_scores  # [batch_size * beam_size, beam_size]
            scores = tf.reshape(scores,
                                shape=[batch_size, beam_size * beam_size])

            _, k_indices = tf.nn.top_k(scores, k=beam_size)
            k_indices = base_indices * beam_size * beam_size + tf.reshape(
                k_indices, shape=[-1])  # [batch_size * beam_size]
            # Update scores.
            scores = tf.reshape(scores, [-1])
            scores = tf.gather(scores, k_indices)
            # Update predictions.
            next_preds = tf.reshape(next_preds, shape=[-1])
            next_preds = tf.gather(next_preds, indices=k_indices)

            # k_indices: [0~batch*beam*beam], preds: [0~batch*beam]
            # preds, cache_lm, cache_decoder: these data are shared during the beam expand among vocab
            preds = tf.gather(preds, indices=k_indices // beam_size)
            cache_lm = tf.gather(cache_lm, indices=k_indices // beam_size)
            cache_decoder = tf.gather(cache_decoder,
                                      indices=k_indices // beam_size)
            preds = tf.concat([preds, next_preds[:, None]],
                              axis=1)  # [batch_size * beam_size, i]

            has_eos = tf.equal(next_preds, self.end_token)
            finished = tf.logical_or(finished, has_eos)
            len_decoded += 1 - tf.to_int32(finished)
            # i = tf.Print(i, [i], message='i: ', summarize=1000)

            return i + 1, preds, scores, cache_decoder, cache_lm, logits, len_decoded, finished
예제 #47
0
  def _parse_train_data(self, data):
    """Parse data for ShapeMask training."""
    classes = data['groundtruth_classes']
    boxes = data['groundtruth_boxes']
    masks = data['groundtruth_instance_masks']
    is_crowds = data['groundtruth_is_crowd']
    # Skips annotations with `is_crowd` = True.
    if self._skip_crowd_during_training and self._is_training:
      num_groundtrtuhs = tf.shape(classes)[0]
      with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
        indices = tf.cond(
            tf.greater(tf.size(is_crowds), 0),
            lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
            lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
      classes = tf.gather(classes, indices)
      boxes = tf.gather(boxes, indices)
      masks = tf.gather(masks, indices)

    # Gets original image and its size.
    image = data['image']
    image_shape = tf.shape(image)[0:2]

    # If not using category, makes all categories with id = 0.
    if not self._use_category:
      classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)

    # Normalizes image with mean and std pixel values.
    image = input_utils.normalize_image(image)

    # Flips image randomly during training.
    if self._aug_rand_hflip:
      image, boxes, masks = input_utils.random_horizontal_flip(
          image, boxes, masks)

    # Converts boxes from normalized coordinates to pixel coordinates.
    boxes = box_utils.denormalize_boxes(boxes, image_shape)

    # Resizes and crops image.
    image, image_info = input_utils.resize_and_crop_image(
        image,
        self._output_size,
        self._output_size,
        aug_scale_min=self._aug_scale_min,
        aug_scale_max=self._aug_scale_max)
    image_scale = image_info[2, :]
    offset = image_info[3, :]

    # Resizes and crops boxes and masks.
    boxes = input_utils.resize_and_crop_boxes(
        boxes, image_scale, self._output_size, offset)
    masks = input_utils.resize_and_crop_masks(
        tf.expand_dims(masks, axis=-1), image_scale, self._output_size, offset)
    masks = tf.squeeze(masks, axis=-1)

    # Filters out ground truth boxes that are all zeros.
    indices = input_utils.get_non_empty_box_indices(boxes)
    boxes = tf.gather(boxes, indices)
    classes = tf.gather(classes, indices)
    masks = tf.gather(masks, indices)

    # Assigns anchors.
    input_anchor = anchor.Anchor(
        self._min_level, self._max_level, self._num_scales,
        self._aspect_ratios, self._anchor_size, self._output_size)
    anchor_labeler = anchor.AnchorLabeler(
        input_anchor, self._match_threshold, self._unmatched_threshold)
    (cls_targets,
     box_targets,
     num_positives) = anchor_labeler.label_anchors(
         boxes,
         tf.cast(tf.expand_dims(classes, axis=1), tf.float32))

    # Sample groundtruth masks/boxes/classes for mask branch.
    num_masks = tf.shape(masks)[0]
    mask_shape = tf.shape(masks)[1:3]

    # Pad sampled boxes/masks/classes to a constant batch size.
    padded_boxes = input_utils.pad_to_fixed_size(boxes, self._num_sampled_masks)
    padded_classes = input_utils.pad_to_fixed_size(
        classes, self._num_sampled_masks)
    padded_masks = input_utils.pad_to_fixed_size(masks, self._num_sampled_masks)

    # Randomly sample groundtruth masks for mask branch training. For the image
    # without groundtruth masks, it will sample the dummy padded tensors.
    rand_indices = tf.random.uniform(
        [self._num_sampled_masks],
        minval=0,
        maxval=tf.maximum(num_masks, 1),
        dtype=tf.dtypes.int32)
    sampled_boxes = tf.gather(padded_boxes, rand_indices)
    sampled_classes = tf.gather(padded_classes, rand_indices)
    sampled_masks = tf.gather(padded_masks, rand_indices)
    # Jitter the sampled boxes to mimic the noisy detections.
    sampled_boxes = box_utils.jitter_boxes(
        sampled_boxes, noise_scale=self._box_jitter_scale)

    # Compute mask targets in feature crop. A feature crop fully contains a
    # sampled box.
    mask_outer_boxes = box_utils.compute_outer_boxes(
        sampled_boxes, mask_shape, scale=self._outer_box_scale)
    norm_mask_outer_boxes = box_utils.normalize_boxes(
        mask_outer_boxes, mask_shape)

    # Set sampled_masks shape to [batch_size, height, width, 1].
    sampled_masks = tf.expand_dims(sampled_masks, axis=-1)
    mask_targets = tf.image.crop_and_resize(
        sampled_masks,
        norm_mask_outer_boxes,
        box_ind=tf.range(self._num_sampled_masks),
        crop_size=[self._mask_crop_size, self._mask_crop_size],
        method='bilinear',
        extrapolation_value=0,
        name='train_mask_targets')
    mask_targets = tf.where(tf.greater_equal(mask_targets, 0.5),
                            tf.ones_like(mask_targets),
                            tf.zeros_like(mask_targets))
    mask_targets = tf.squeeze(mask_targets, axis=-1)

    # If bfloat16 is used, casts input image to tf.bfloat16.
    if self._use_bfloat16:
      image = tf.cast(image, dtype=tf.bfloat16)

    # Packs labels for model_fn outputs.
    labels = {
        'cls_targets': cls_targets,
        'box_targets': box_targets,
        'anchor_boxes': input_anchor.multilevel_boxes,
        'num_positives': num_positives,
        'image_info': image_info,
        # For ShapeMask.
        'mask_boxes': sampled_boxes,
        'mask_outer_boxes': mask_outer_boxes,
        'mask_targets': mask_targets,
        'mask_classes': sampled_classes,
        'mask_is_valid': tf.cast(tf.not_equal(num_masks, 0), tf.int32)
    }
    return image, labels
예제 #48
0
    def build_graph(self):
        # Extract DCN features (here ResNet v2, 50 layers)
        X = tf.reshape(self.X, [self.BT, 224, 224, 3])
        _ = self.net.resnet_v2(X)

        features = tf.reshape(self.net.spatial,
                              [self.BATCH, self.T, 7, 7, 2048])
        self.features = features

        # Encoder
        with tf.variable_scope(self.scope):
            with tf.variable_scope("LSTM2") as scope:
                lstm = tf.contrib.rnn.LSTMCell(
                    self.DIM_LSTM,
                    initializer=tf.contrib.layers.xavier_initializer())
                state = lstm.zero_state(self.BATCH, tf.float32)

                feat_T = tf.split(features, self.T, axis=1)

                outputs = []
                joint_maps = []
                for t in range(self.T):
                    # TODO: Each body part has its own variables
                    if t > 0:
                        scope.reuse_variables()

                    # Generate Attention Map for each Joint and normalize
                    h_rgb = tf.reshape(feat_T[t], [self.BATCH, 7, 7, 2048])
                    jm_list, jm_tensor = self.generate_attention_maps(
                        state, h_rgb)
                    joint_maps.append(tf.expand_dims(
                        jm_tensor, axis=2))  # B x 5 x T x 7 x 7 x J

                    # Assemble Parts
                    body_parts = self.assemble_parts(jm_list, h_rgb)  # F_t^P
                    body_pooled = tf.reduce_max(body_parts, axis=1)  # S_t

                    # body_pooled = tf.reshape( body_pooled, [self.BATCH, 7*7*2048] )
                    # Global pooling to save resources
                    body_pooled = tf.reduce_mean(body_pooled, axis=[1, 2])

                    feat_out, state = lstm(body_pooled, state)

                    outputs.append(tf.expand_dims(feat_out, axis=1))

            h_lstm = tf.concat(outputs, axis=1)
            h_lstm = tf.reshape(h_lstm, [self.BT, self.DIM_LSTM])

            h_pred = util.fc(h_lstm, self.C, "classifier_pose")
            h_pred = tf.reshape(h_pred, [self.BATCH, self.T, self.C])

        # Loss computation
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope=self.scope)
        reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                     scope=self.scope)

        # Main losses: Softmax classification loss
        loss_pose_pre = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=h_pred, labels=self.Y)
        loss_pose_T = loss_pose_pre
        loss_pose_cls = tf.reduce_sum(loss_pose_pre, axis=1)

        # Main losses: Joint map L2 regression loss
        joint_maps = tf.concat(joint_maps, axis=2)
        loss_pose_l2 = 0

        # Note, we got 5 sets of attention maps. Each have an L2 loss.
        for i in range(5):
            diff = tf.reshape(joint_maps[:, i] - self.P,
                              [self.BATCH, self.T, 7 * 7 * self.J])
            loss_pose_l2 += 0.5 * tf.reduce_sum(diff**2, axis=2)

        # Total Loss
        loss = tf.reduce_mean(self.l_action * loss_pose_pre +
                              self.l_pose * loss_pose_l2)

        reg_loss = self.lambda_l2 * tf.reduce_sum(reg_loss)
        total = reg_loss + loss

        # Optimizer + Batch Gradient Accumulation
        #opt         = tf.train.RMSPropOptimizer( learning_rate = self.LR )
        opt = tf.train.AdamOptimizer(learning_rate=self.LR)

        accum_vars = [
            tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False)
            for tv in var_list
        ]
        zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in accum_vars]

        gvs = opt.compute_gradients(total, var_list)

        accum_ops = [
            accum_vars[i].assign_add(gv[0]) for i, gv in enumerate(gvs)
        ]
        op = opt.apply_gradients([(accum_vars[i], gv[1])
                                  for i, gv in enumerate(gvs)])

        # Exposing variables
        self.joint_maps = joint_maps
        self.reg_loss = reg_loss
        self.loss_main_T = loss_pose_T
        self.loss_rpan = loss_pose_cls
        self.loss_pose = loss_pose_l2
        self.zero_ops = zero_ops
        self.accum_ops = accum_ops
        self.accum_vars = accum_vars

        self.result = tf.nn.softmax(h_pred)

        return op, total
예제 #49
0
    def decode_infer_2_bs(self):
        # beam search version
        # during second stage decoding, we have a decoded sequence, so do not need to feed state(no incremental dec)
        # at time i, we calculate i-th attn_bias, get i-th decoder output
        with self.graph.as_default():
            config = self.bert_config

            target_sequence = tf.reshape(
                self.decode_seq,
                [self.hps.eval_batch_size * self.hps.beam_size, -1])
            target_length = self.decode_length
            target_seg_ids = tf.zeros_like(target_sequence,
                                           dtype=tf.int32,
                                           name='target_seg_ids_infer')
            tgt_mask = tf.sequence_mask(target_length,
                                        maxlen=tf.shape(target_sequence)[1],
                                        dtype=tf.float32)  # [b, q']

            with tf.variable_scope('bert', reuse=True):
                with tf.variable_scope('embeddings'), tf.device('/cpu:0'):
                    # Perform embedding lookup on the target word ids.
                    (tgt_embed, _) = embedding_lookup(
                        input_ids=target_sequence,
                        vocab_size=config.vocab_size,
                        embedding_size=config.hidden_size,
                        initializer_range=config.initializer_range,
                        word_embedding_name='word_embeddings',
                        use_one_hot_embeddings=False)

                    # Add positional embeddings and token type embeddings, then layer
                    # normalize and perform dropout.
                    tgt_embed = embedding_postprocessor(
                        input_tensor=tgt_embed,
                        use_token_type=True,
                        token_type_ids=target_seg_ids,
                        token_type_vocab_size=config.type_vocab_size,
                        token_type_embedding_name='token_type_embeddings',
                        use_position_embeddings=True,
                        position_embedding_name='position_embeddings',
                        initializer_range=config.initializer_range,
                        max_position_embeddings=config.max_position_embeddings,
                        dropout_prob=config.hidden_dropout_prob)

            with tf.variable_scope('decoder', reuse=True):
                # [b, l_t, e]
                masked_tgt_embed = tgt_embed * tf.expand_dims(tgt_mask, -1)
                dec_attn_bias = attention_bias(
                    tf.shape(masked_tgt_embed)[1], "cloze_bias")
                # this operation is necessary as the att bias is shifted
                infer_decoder_input = tf.pad(
                    masked_tgt_embed,
                    [[0, 0], [1, 0], [0, 0]])[:, :-1, :]  # Shift left

                # This operation is wrong!!!
                # infer_dec_attn_bias = dec_attn_bias[:, :, self.time_step:self.time_step + 1, :]

                all_att_weights, decoder_output = transformer_decoder(
                    infer_decoder_input,
                    self.enc_output,
                    dec_attn_bias,
                    self.enc_attn_bias,
                    self.hps,
                    scope='t_decoder')
                decoder_output = decoder_output[:, self.
                                                time_step, :]  # [b * beam, e]
                logits = tf.matmul(decoder_output, self.decoder_weights, False,
                                   True)  # [b * beam, v]
                log_prob = tf.nn.log_softmax(logits)
        return log_prob
예제 #50
0
def compact_bilinear_pooling_layer(bottom1,
                                   bottom2,
                                   output_dim,
                                   sum_pool=True,
                                   rand_h_1=None,
                                   rand_s_1=None,
                                   rand_h_2=None,
                                   rand_s_2=None,
                                   seed_h_1=1,
                                   seed_s_1=3,
                                   seed_h_2=5,
                                   seed_s_2=7,
                                   sequential=True,
                                   compute_size=128):
    """
    Compute compact bilinear pooling over two bottom inputs. Reference:

    Yang Gao, et al. "Compact Bilinear Pooling." in Proceedings of IEEE
    Conference on Computer Vision and Pattern Recognition (2016).
    Akira Fukui, et al. "Multimodal Compact Bilinear Pooling for Visual Question
    Answering and Visual Grounding." arXiv preprint arXiv:1606.01847 (2016).

    Args:
        bottom1: 1st input, 4D Tensor of shape [batch_size, height, width, input_dim1].
        bottom2: 2nd input, 4D Tensor of shape [batch_size, height, width, input_dim2].

        output_dim: output dimension for compact bilinear pooling.

        sum_pool: (Optional) If True, sum the output along height and width
                  dimensions and return output shape [batch_size, output_dim].
                  Otherwise return [batch_size, height, width, output_dim].
                  Default: True.

        rand_h_1: (Optional) an 1D numpy array containing indices in interval
                  `[0, output_dim)`. Automatically generated from `seed_h_1`
                  if is None.
        rand_s_1: (Optional) an 1D numpy array of 1 and -1, having the same shape
                  as `rand_h_1`. Automatically generated from `seed_s_1` if is
                  None.
        rand_h_2: (Optional) an 1D numpy array containing indices in interval
                  `[0, output_dim)`. Automatically generated from `seed_h_2`
                  if is None.
        rand_s_2: (Optional) an 1D numpy array of 1 and -1, having the same shape
                  as `rand_h_2`. Automatically generated from `seed_s_2` if is
                  None.

        sequential: (Optional) if True, use the sequential FFT and IFFT
                    instead of tf.batch_fft or tf.batch_ifft to avoid
                    out-of-memory (OOM) error.
                    Note: sequential FFT and IFFT are only available on GPU
                    Default: True.
        compute_size: (Optional) The maximum size of sub-batch to be forwarded
                      through FFT or IFFT in one time. Large compute_size may
                      be faster but can cause OOM and FFT failure. This
                      parameter is only effective when sequential == True.
                      Default: 128.

    Returns:
        Compact bilinear pooled results of shape [batch_size, output_dim] or
        [batch_size, height, width, output_dim], depending on `sum_pool`.
    """

    # Static shapes are needed to construction count sketch matrix
    input_dim1 = bottom1.get_shape().as_list()[-1]
    input_dim2 = bottom2.get_shape().as_list()[-1]

    # Step 0: Generate vectors and sketch matrix for tensor count sketch
    # This is only done once during graph construction, and fixed during each
    # operation
    if rand_h_1 is None:
        np.random.seed(seed_h_1)
        rand_h_1 = np.random.randint(output_dim, size=input_dim1)
    if rand_s_1 is None:
        np.random.seed(seed_s_1)
        rand_s_1 = 2 * np.random.randint(2, size=input_dim1) - 1
    sparse_sketch_matrix1 = _generate_sketch_matrix(rand_h_1, rand_s_1,
                                                    output_dim)
    if rand_h_2 is None:
        np.random.seed(seed_h_2)
        rand_h_2 = np.random.randint(output_dim, size=input_dim2)
    if rand_s_2 is None:
        np.random.seed(seed_s_2)
        rand_s_2 = 2 * np.random.randint(2, size=input_dim2) - 1
    sparse_sketch_matrix2 = _generate_sketch_matrix(rand_h_2, rand_s_2,
                                                    output_dim)

    # Step 1: Flatten the input tensors and count sketch
    bottom1_flat = tf.reshape(bottom1, [-1, input_dim1])
    bottom2_flat = tf.reshape(bottom2, [-1, input_dim2])
    # Essentially:
    #   sketch1 = bottom1 * sparse_sketch_matrix
    #   sketch2 = bottom2 * sparse_sketch_matrix
    # But tensorflow only supports left multiplying a sparse matrix, so:
    #   sketch1 = (sparse_sketch_matrix.T * bottom1.T).T
    #   sketch2 = (sparse_sketch_matrix.T * bottom2.T).T
    sketch1 = tf.transpose(
        tf.sparse_tensor_dense_matmul(sparse_sketch_matrix1,
                                      bottom1_flat,
                                      adjoint_a=True,
                                      adjoint_b=True))
    sketch2 = tf.transpose(
        tf.sparse_tensor_dense_matmul(sparse_sketch_matrix2,
                                      bottom2_flat,
                                      adjoint_a=True,
                                      adjoint_b=True))

    # Step 2: FFT
    fft1 = _fft(tf.complex(real=sketch1, imag=tf.zeros_like(sketch1)),
                sequential, compute_size)
    fft2 = _fft(tf.complex(real=sketch2, imag=tf.zeros_like(sketch2)),
                sequential, compute_size)

    # Step 3: Elementwise product
    fft_product = tf.multiply(fft1, fft2)

    # Step 4: Inverse FFT and reshape back
    # Compute output shape dynamically: [batch_size, height, width, output_dim]
    cbp_flat = tf.real(_ifft(fft_product, sequential, compute_size))
    output_shape = tf.add(tf.multiply(tf.shape(bottom1), [1, 1, 1, 0]),
                          [0, 0, 0, output_dim])
    cbp = tf.reshape(cbp_flat, output_shape)
    #pdb.set_trace()
    # Step 5: Sum pool over spatial dimensions, if specified
    if sum_pool:
        cbp = tf.reduce_mean(cbp, reduction_indices=[1, 2])

    return cbp
예제 #51
0
def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss = real_loss + fake_loss
    return total_loss
예제 #52
0
def main(_):
    
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    if not os.path.exists(FLAGS.summaries_dir):
        os.makedirs(FLAGS.summaries_dir)
        
    with tf.device("/gpu:0"):
    #with tf.device("/cpu:0"):
        z = tf.placeholder(tf.float32, [FLAGS.batch_size, FLAGS.z_dim], name="g_input_noise")
        x =  tf.placeholder(tf.float32, [FLAGS.batch_size, FLAGS.output_size, FLAGS.output_size, FLAGS.c_dim], name='d_input_images')
        
        Gz =  network.generator(z)
        Dx, Dfx =  network.discriminator(x)
        Dz, Dfz = network.discriminator(Gz, reuse=True)
        
        
        d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dx, labels=tf.ones_like(Dx)))
        d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dz, labels=tf.zeros_like(Dz)))
        d_loss =  d_loss_real + d_loss_fake
        
        g_loss_perceptual = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Dz, labels = tf.ones_like(Dz)))
        g_loss_features = tf.reduce_mean(tf.nn.l2_loss(Dfx-Dfz))/(FLAGS.image_size*FLAGS.image_size)
        g_loss = g_loss_perceptual + g_loss_features
        
        
        tvars = tf.trainable_variables()
        d_vars =  [var for var in tvars if 'd_' in var.name]
        g_vars =  [var for var in tvars if 'g_' in var.name]

        print(d_vars)
        print("---------------")
        print(g_vars)
        
        with tf.variable_scope(tf.get_variable_scope(),reuse=False): 
            print("reuse or not: {}".format(tf.get_variable_scope().reuse))
            assert tf.get_variable_scope().reuse == False, "Houston tengo un problem"
            d_trainer = tf.train.AdamOptimizer(FLAGS.learning_rate, FLAGS.beta1).minimize(d_loss, var_list=d_vars)
            g_trainer = tf.train.AdamOptimizer(FLAGS.learning_rate, FLAGS.beta1).minimize(g_loss, var_list=g_vars)
        
        tf.summary.scalar("generator_loss_percptual", g_loss_perceptual)
        tf.summary.scalar("generator_loss_features", g_loss_features)
        tf.summary.scalar("generator_loss_total", g_loss)
        tf.summary.scalar("discriminator_loss", d_loss)
        tf.summary.scalar("discriminator_loss_real", d_loss_real)
        tf.summary.scalar("discriminator_loss_fake", d_loss_fake)
        
        images_for_tensorboard = network.generator(z, reuse=True)
        tf.summary.image('Generated_images', images_for_tensorboard, 2)
        
        merged = tf.summary.merge_all()
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.30)
        gpu_options.allow_growth = True
              
        saver = tf.train.Saver()
        
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
        
        print("starting session")
        summary_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph)
        sess.run(tf.global_variables_initializer())
        
        
        data_files = glob(os.path.join("./data", FLAGS.dataset, "*.jpg"))
        
        model_dir = "%s_%s_%s" % (FLAGS.dataset, 64, FLAGS.output_size)
        save_dir = os.path.join(FLAGS.checkpoint_dir, model_dir)
        
        
        if FLAGS.is_train:
            for epoch in range(FLAGS.epoch):
                
                d_total_cost = 0.
                g_total_cost = 0.
                shuffle(data_files)
                num_batches = min(len(data_files), FLAGS.train_size) // FLAGS.batch_size
                #num_batches = 2
                for batch_i in range(num_batches):
                    batch_files = data_files[batch_i*FLAGS.batch_size:(batch_i+1)*FLAGS.batch_size]
                    batch = [utilities.load_image(batch_file, FLAGS.image_size, is_crop=FLAGS.is_crop, resize_w=FLAGS.output_size) for batch_file in batch_files]
                    batch_x = np.array(batch).astype(np.float32)
                    batch_z = np.random.normal(-1, 1, size=[FLAGS.batch_size, FLAGS.z_dim]).astype(np.float32)
                    start_time = time.time()
                    
                    d_err, _ = sess.run([d_loss, d_trainer], feed_dict={z: batch_z, x: batch_x})
                    g_err, _ = sess.run([g_loss, g_trainer], feed_dict={z: batch_z, x: batch_x})
                    
                    d_total_cost += d_err
                    g_total_cost += g_err
                    
                    if batch_i % 10 == 0:
                        summary = sess.run(merged, feed_dict={x: batch_x, z: batch_z})
                        summary_writer.add_summary(summary, (epoch-1)*(num_batches/30)+(batch_i/30))
                    
                    print("Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
                        % (epoch, FLAGS.epoch, batch_i, num_batches,
                            time.time() - start_time, d_err, g_err))
                

                print("Epoch:", '%04d' % (epoch+1), "d_cost=", \
                          "{:.9f}".format(d_total_cost/num_batches), "g_cost=", "{:.9f}".format(g_total_cost/num_batches))
    
                sys.stdout.flush()
        save_path = saver.save(sess, save_dir)
        print("Model saved in path: %s" % save_path)
        sys.stdout.flush()
    sess.close()   
예제 #53
0
    def beam_decode_rerank(self, encoded, len_encoded):
        """
        beam search rerank at end with language model integration (self-attention model)
        the input to te score is <sos> + tokens !!!
        """
        beam_size = self.beam_size
        batch_size = tf.shape(len_encoded)[0]

        # beam search Initialize
        # repeat each sample in batch along the batch axis [1,2,3,4] -> [1,1,2,2,3,3,4,4]
        encoded = tf.tile(encoded[:, None, :, :],
                          multiples=[
                              1, beam_size, 1, 1
                          ])  # [batch_size, beam_size, *, hidden_units]
        encoded = tf.reshape(
            encoded,
            [batch_size * beam_size, -1,
             encoded.get_shape()[-1].value])
        len_encoded = tf.reshape(
            tf.tile(len_encoded[:, None], multiples=[1, beam_size]),
            [-1])  # [batch_size * beam_size]

        # [[<S>, <S>, ..., <S>]], shape: [batch_size * beam_size, 1]
        token_init = tf.fill([batch_size * beam_size, 1], self.args.sos_idx)
        logits_init = tf.zeros([batch_size * beam_size, 0, self.dim_output],
                               dtype=tf.float32)
        len_decoded_init = tf.ones_like(len_encoded, dtype=tf.int32)
        # the score must be [0, -inf, -inf, ...] at init, for the preds in beam is same in init!!!
        scores_init = tf.constant([0.0] + [-inf] * (beam_size - 1),
                                  dtype=tf.float32)  # [beam_size]
        scores_init = tf.tile(scores_init,
                              multiples=[batch_size
                                         ])  # [batch_size * beam_size]
        finished_init = tf.zeros_like(scores_init, dtype=tf.bool)

        cache_decoder_init = tf.zeros(
            [batch_size * beam_size, 0, self.num_blocks, self.num_cell_units])
        if self.lm:
            cache_lm_init = tf.zeros([
                batch_size * beam_size, 0,
                self.lm.args.model.decoder.num_blocks,
                self.lm.args.model.decoder.num_cell_units
            ])
        else:
            cache_lm_init = tf.zeros([0, 0, 0, 0])

        # collect the initial states of lstms used in decoder.
        base_indices = tf.reshape(tf.tile(tf.range(batch_size)[:, None],
                                          multiples=[1, beam_size]),
                                  shape=[-1])

        encoder_padding = tf.equal(
            tf.sequence_mask(len_encoded, maxlen=tf.shape(encoded)[1]),
            False)  # bool tensor
        encoder_attention_bias = common_attention.attention_bias_ignore_padding(
            encoder_padding)

        def step(i, preds, scores, cache_decoder, cache_lm, logits,
                 len_decoded, finished):
            """
            the cache has no specific shape, so no can be put in the all_states
            """
            preds_emb = self.embedding(preds)
            decoder_input = preds_emb

            decoder_output, cache_decoder = self.decoder_with_caching_impl(
                decoder_input, cache_decoder, encoded, encoder_attention_bias)

            cur_logit = tf.layers.dense(inputs=decoder_output[:, -1, :],
                                        units=self.dim_output,
                                        activation=None,
                                        use_bias=False,
                                        name='decoder_fc')

            logits = tf.concat([logits, cur_logit[:, None]], 1)
            z = tf.nn.log_softmax(cur_logit)  # [batch*beam, size_output]

            # the langueage model infer
            if self.args.model.shallow_fusion:
                assert self.lm
                preds_emb = self.lm.decoder.embedding(preds)

                with tf.variable_scope(self.args.top_scope, reuse=True):
                    with tf.variable_scope(self.args.lm_scope):
                        lm_output, cache_lm = self.lm.decoder.decoder_with_caching_impl(
                            preds_emb, cache_lm)
                        logit_lm = dense(inputs=lm_output[:, -1, :],
                                         units=self.dim_output,
                                         kernel=tf.transpose(
                                             self.lm.decoder.fully_connected),
                                         use_bias=False)
                z_lm = self.lambda_lm * tf.nn.log_softmax(
                    logit_lm)  # [batch*beam, size_output]
            else:
                z_lm = tf.zeros_like(z)

            # rank the combined scores
            next_scores, next_preds = tf.nn.top_k(z + z_lm,
                                                  k=beam_size,
                                                  sorted=True)
            next_preds = tf.to_int32(next_preds)

            # beamed scores & Pruning
            scores = scores[:,
                            None] + next_scores  # [batch_size * beam_size, beam_size]
            scores = tf.reshape(scores,
                                shape=[batch_size, beam_size * beam_size])

            _, k_indices = tf.nn.top_k(scores, k=beam_size)
            k_indices = base_indices * beam_size * beam_size + tf.reshape(
                k_indices, shape=[-1])  # [batch_size * beam_size]
            # Update scores.
            scores = tf.reshape(scores, [-1])
            scores = tf.gather(scores, k_indices)
            # Update predictions.
            next_preds = tf.reshape(next_preds, shape=[-1])
            next_preds = tf.gather(next_preds, indices=k_indices)

            # k_indices: [0~batch*beam*beam], preds: [0~batch*beam]
            # preds, cache_lm, cache_decoder: these data are shared during the beam expand among vocab
            preds = tf.gather(preds, indices=k_indices // beam_size)
            cache_lm = tf.gather(cache_lm, indices=k_indices // beam_size)
            cache_decoder = tf.gather(cache_decoder,
                                      indices=k_indices // beam_size)
            preds = tf.concat([preds, next_preds[:, None]],
                              axis=1)  # [batch_size * beam_size, i]

            has_eos = tf.equal(next_preds, self.end_token)
            finished = tf.logical_or(finished, has_eos)
            len_decoded += 1 - tf.to_int32(finished)
            # i = tf.Print(i, [i], message='i: ', summarize=1000)

            return i + 1, preds, scores, cache_decoder, cache_lm, logits, len_decoded, finished

        def not_finished(i, preds, scores, cache_decoder, cache_lm, logit,
                         len_decoded, finished):
            # i = tf.Print(i, [i], message='i: ', summarize=1000)
            return tf.logical_and(
                tf.reduce_any(tf.logical_not(finished)),
                tf.less(
                    i,
                    tf.reduce_min([tf.shape(encoded)[1],
                                   self.args.max_len])  # maxlen = 100
                ))

        _, preds, scores_am, _, _, logits, len_decoded, finished = tf.while_loop(
            cond=not_finished,
            body=step,
            loop_vars=[
                0, token_init, scores_init, cache_decoder_init, cache_lm_init,
                logits_init, len_decoded_init, finished_init
            ],
            shape_invariants=[
                tf.TensorShape([]),
                tf.TensorShape([None, None]),
                tf.TensorShape([None]),
                tf.TensorShape([None, None, None, None]),
                tf.TensorShape([None, None, None, None]),
                tf.TensorShape([None, None, self.dim_output]),
                tf.TensorShape([None]),
                tf.TensorShape([None])
            ])

        # [batch_size * beam_size, ...]
        len_decoded -= 1 - tf.to_int32(
            finished)  # for decoded length cut by encoded length
        preds = preds[:, 1:]
        not_padding = tf.sequence_mask(len_decoded, dtype=tf.int32)
        preds *= not_padding

        # [batch_size , beam_size, ...]
        if self.args.model.rerank:
            assert self.lm
            with tf.variable_scope(self.args.top_scope, reuse=True):
                with tf.variable_scope(self.args.lm_scope):
                    scores_lm, distribution = self.lm.decoder.score(
                        preds, len_decoded)

            scores_lm = self.args.lambda_rerank * scores_lm
        else:
            scores_lm = tf.zeros_like(scores_am)

        scores = scores_am + scores_lm

        # tf.nn.top_k is used to sort `scores`
        scores_sorted, sorted = tf.nn.top_k(tf.reshape(
            scores, [batch_size, beam_size]),
                                            k=beam_size,
                                            sorted=True)

        sorted = base_indices * beam_size + tf.reshape(
            sorted, shape=[-1])  # [batch_size * beam_size]

        # [batch_size * beam_size, ...]
        logits_sorted = tf.gather(logits, sorted)
        preds_sorted = tf.gather(preds, sorted)
        len_decoded_sorted = tf.gather(len_decoded, sorted)
        scores_lm_sorted = tf.gather(scores_lm, sorted)
        scores_am_sorted = tf.gather(scores_am, sorted)

        # [batch_size, beam_size, ...]
        scores_lm_sorted = tf.reshape(scores_lm_sorted,
                                      shape=[batch_size, beam_size])
        scores_am_sorted = tf.reshape(scores_am_sorted,
                                      shape=[batch_size, beam_size])
        preds_sorted = tf.reshape(
            preds_sorted, shape=[batch_size, beam_size,
                                 -1])  # [batch_size, beam_size, max_length]
        logits_sorted = tf.reshape(
            logits_sorted, [batch_size, beam_size, -1, self.dim_output])
        len_decoded_sorted = tf.reshape(len_decoded_sorted,
                                        [batch_size, beam_size])

        # return logits, final_preds, len_encoded
        return [
            logits_sorted, preds_sorted, len_decoded_sorted, scores_am_sorted,
            scores_lm_sorted
        ], preds_sorted[:, 0, :], len_decoded_sorted[:, 0]
예제 #54
0
파일: Haar.py 프로젝트: koritsky/ssvd
def haar_psi_tensorflow(reference_image, distorted_image, preprocess_with_subsampling = True):
    """
    Calculates the HaarPSI perceptual similarity index between the two specified images. This implementation uses TensorFlow.

    Parameters:
    -----------
        reference_image: tensorflow.Tensor | tensorflow.Variable
            The reference image, which can be in RGB or grayscale. The values must be in the range [0, 255].
            The image must be a TensorFlow Tensor of the shape (width, height, 3) in the case of RGB or a
            TensorFlow tensor in the shape (width, height) for grayscale.
        distorted_image: tensorflow.Tensor | tensorflow.Variable
            The distorted image, which is to be compared to the reference image. The image can be in RGB or
            grayscale. The values must be in the range [0, 255]. The image must be a TensorFlow tensor of
            the shape (width, height, 3) in the case of RGB or a TensorFlow tensor in the shape
            (width, height) for grayscale.
        preprocess_with_subsampling: boolean
            An optional parameter, which determines whether a preprocessing step is to be performed, which
            accommodates for the viewing distance in psychophysical experiments.

    Returns:
    --------
        (float, tensorflow.Tensor, tensorflow.Tensor): Returns a three-tuple containing the similarity score,
        the similarity maps and the weight maps. The similarity score is the Haar wavelet-based perceptual
        similarity index, measured in the interval [0,1]. The similarity maps are maps of horizontal and
        vertical local similarities. For RGB images, this variable also includes a similarity map with
        respect to the two color channels in the YIQ space. The weight maps are maps that measure the
        importance of the local similarities in the similarity maps.
    """

    if not is_tensorflow_available:
        raise ValueError("TensorFlow is not installed. If you have TensorFlow installed, please check your installation.")

    # Checks if the images are both single precision floats
    if reference_image.dtype != tf.float32:
        raise ValueError("The reference image has to be single precision float.")
    if distorted_image.dtype != tf.float32:
        raise ValueError("The distorted image has to be single precision float.")

    # Checks if the image is a grayscale or an RGB image
    if reference_image.get_shape().as_list() != distorted_image.get_shape().as_list():
        raise ValueError("The shapes of the reference image and the distorted image do not match.")
    if len(reference_image.get_shape().as_list()) == 2:
        is_color_image = False
    elif reference_image.get_shape().as_list()[2] == 1:
        is_color_image = False
    else:
        is_color_image = True

    # The HaarPSI algorithm requires two constants, C and alpha, that have been experimentally determined
    # to be C = 30 and alpha = 4.2
    C = tf.constant(30.0, dtype = tf.float32)
    alpha = tf.constant(4.2, dtype = tf.float32)

    # If the images are in RGB, then they are transformed to the YIQ color space
    if is_color_image:
        reference_image_y = 0.299 * reference_image[:, :, 0] + 0.587 * reference_image[:, :, 1] + 0.114 * reference_image[:, :, 2]
        distorted_image_y = 0.299 * distorted_image[:, :, 0] + 0.587 * distorted_image[:, :, 1] + 0.114 * distorted_image[:, :, 2]
        reference_image_i = 0.596 * reference_image[:, :, 0] - 0.274 * reference_image[:, :, 1] - 0.322 * reference_image[:, :, 2]
        distorted_image_i = 0.596 * distorted_image[:, :, 0] - 0.274 * distorted_image[:, :, 1] - 0.322 * distorted_image[:, :, 2]
        reference_image_q = 0.211 * reference_image[:, :, 0] - 0.523 * reference_image[:, :, 1] + 0.312 * reference_image[:, :, 2]
        distorted_image_q = 0.211 * distorted_image[:, :, 0] - 0.523 * distorted_image[:, :, 1] + 0.312 * distorted_image[:, :, 2]
    else:
        reference_image_y = reference_image
        distorted_image_y = distorted_image

    # Subsamples the images, which simulates the typical distance between an image and its viewer
    if preprocess_with_subsampling:
        reference_image_y = subsample(reference_image_y)
        distorted_image_y = subsample(distorted_image_y)
        if is_color_image:
            reference_image_i = subsample(reference_image_i)
            distorted_image_i = subsample(distorted_image_i)
            reference_image_q = subsample(reference_image_q)
            distorted_image_q = subsample(distorted_image_q)

    # Performs the Haar wavelet decomposition
    number_of_scales = 3
    coefficients_reference_image_y = haar_wavelet_decompose(reference_image_y, number_of_scales)
    coefficients_distorted_image_y = haar_wavelet_decompose(distorted_image_y, number_of_scales)
    if is_color_image:
        coefficients_reference_image_i = tf.abs(convolve2d(reference_image_i, tf.ones((2, 2)) / 4.0, mode = "same"))
        coefficients_distorted_image_i = tf.abs(convolve2d(distorted_image_i, tf.ones((2, 2)) / 4.0, mode = "same"))
        coefficients_reference_image_q = tf.abs(convolve2d(reference_image_q, tf.ones((2, 2)) / 4.0, mode = "same"))
        coefficients_distorted_image_q = tf.abs(convolve2d(distorted_image_q, tf.ones((2, 2)) / 4.0, mode = "same"))

    # Pre-allocates the variables for the local similarities and the weights
    if is_color_image:
        local_similarities = [tf.zeros_like(reference_image_y)] * 3
        weights = [tf.zeros_like(reference_image_y)] * 3
    else:
        local_similarities = [tf.zeros_like(reference_image_y)] * 2
        weights = [tf.zeros_like(reference_image_y)] * 2

    # Computes the weights and similarities for each orientation
    for orientation in range(2):
        weights[orientation] = tf.maximum(
            tf.abs(coefficients_reference_image_y[:, :, 2 + orientation * number_of_scales]),
            tf.abs(coefficients_distorted_image_y[:, :, 2 + orientation * number_of_scales])
        )
        coefficients_reference_image_y_magnitude = tf.abs(coefficients_reference_image_y[:, :, orientation * number_of_scales:2 + orientation * number_of_scales])
        coefficients_distorted_image_y_magnitude = tf.abs(coefficients_distorted_image_y[:, :, orientation * number_of_scales:2 + orientation * number_of_scales])
        local_similarities[orientation] = tf.reduce_sum(
            (2 * coefficients_reference_image_y_magnitude * coefficients_distorted_image_y_magnitude + C) / (coefficients_reference_image_y_magnitude**2 + coefficients_distorted_image_y_magnitude**2 + C),
            axis = 2
        ) / 2
    weights = tf.stack(weights, axis = -1)
    local_similarities = tf.stack(local_similarities, axis = -1)

    # Computes the similarities for color channels
    if is_color_image:
        similarity_i = (2 * coefficients_reference_image_i * coefficients_distorted_image_i + C) / (coefficients_reference_image_i**2 + coefficients_distorted_image_i**2 + C)
        similarity_q = (2 * coefficients_reference_image_q * coefficients_distorted_image_q + C) / (coefficients_reference_image_q**2 + coefficients_distorted_image_q**2 + C)
        local_similarities = tf.concat([local_similarities[:, :, slice(0, 2)], tf.expand_dims((similarity_i + similarity_q) / 2, axis = 2)], axis = 2)
        weights = tf.concat([weights[:, :, slice(0, 2)], tf.expand_dims((weights[:, :, 0] + weights[:, :, 1]) / 2, axis = 2)], axis = 2)

    # Calculates the final score
    similarity = logit(tf.reduce_sum(sigmoid(local_similarities[:], alpha) * weights[:]) / tf.reduce_sum(weights[:]), alpha)**2

    # Returns the result
    return similarity, local_similarities, weights
예제 #55
0
def low_rank_seq2tens(sequences, kernel, num_levels, embedding_order=1,\
                      recursive_weights=False, bias=None, reverse=False, return_sequences=False, mask=None):
    """
    Tensorflow implementation of the Low-rank Seq2Tens (LS2T) map
    --------------------------------------------------
    Args
    ----
    :sequences: - a tensor of sequences of shape (num_examples, len_examples, num_features)
    :kernel: - a tensor of component vectors of rank-1 weight tensors of shape (num_components, num_features, num_functionals)
    :num_levels: - an int scalar denoting the cutoff degree in the features themselves (must be consistent with the 'num_components' dimension of 'kernel')
    :embedding_order: - an int scalar denoting the cutoff degree in the algebraic embedding
    :recursive_weights: - whether the rank-1 weight twensors are contructed in a recursive way (must be consistent with the shape of 'kernel')
    :bias: - a tensor of biases of shape (num_components, num_functionals)
    :reverse: - only changes the results with 'return_sequences=True', determines whether the output sequences are constructed by moving the starting point or ending point of subsequences
    """

    num_sequences, len_sequences, num_features = tf.unstack(
        tf.shape(sequences))

    num_components = int(num_levels * (num_levels + 1) /
                         2.) if not recursive_weights else num_levels

    num_functionals = tf.shape(kernel)[-1]

    M = tf.matmul(tf.reshape(sequences, [1, -1, num_features]), kernel)

    M = tf.reshape(
        M, [num_components, num_sequences, len_sequences, num_functionals])

    if bias is not None:
        M += bias[:, None, None, :]

    if mask is not None:
        M = tf.where(mask[None, :, :, None], M, tf.zeros_like(M))

    if embedding_order == 1:
        if recursive_weights:
            return _low_rank_seq2tens_first_order_embedding_recursive_weights(
                M,
                num_levels,
                reverse=reverse,
                return_sequences=return_sequences)
        else:
            return _low_rank_seq2tens_first_order_embedding_indep_weights(
                M,
                num_levels,
                reverse=reverse,
                return_sequences=return_sequences)
    else:
        if recursive_weights:
            return _low_rank_seq2tens_higher_order_embedding_recursive_weights(
                M,
                num_levels,
                embedding_order,
                reverse=reverse,
                return_sequences=return_sequences)
        else:
            return _low_rank_seq2tens_higher_order_embedding_indep_weights(
                M,
                num_levels,
                embedding_order,
                reverse=reverse,
                return_sequences=return_sequences)
예제 #56
0
def dynamic_decode(decoder,
                   impute_finished=False,
                   maximum_iterations=None,
                   parallel_iterations=32,
                   swap_memory=False,
                   scope=None):
    """Perform dynamic decoding with `decoder`.

  Calls initialize() once and step() repeatedly on the Decoder object.

  Args:
    decoder: A `Decoder` instance.
    impute_finished: Python boolean.  If `True`, then states for batch
      entries which are marked as finished get copied through and the
      corresponding outputs get zeroed out.  This causes some slowdown at
      each time step, but ensures that the final state and outputs have
      the correct values and that backprop ignores time steps that were
      marked as finished.
    maximum_iterations: `int32` scalar, maximum allowed number of decoding
       steps.  Default is `None` (decode until the decoder is fully done).
    parallel_iterations: Argument passed to `tf.while_loop`.
    swap_memory: Argument passed to `tf.while_loop`.
    scope: Optional variable scope to use.

  Returns:
    `(final_outputs, final_state, final_sequence_lengths)`.

  Raises:
    TypeError: if `decoder` is not an instance of `Decoder`.
    ValueError: if `maximum_iterations` is provided but is not a scalar.
  """
    if not isinstance(decoder, Decoder):
        raise TypeError("Expected decoder to be type Decoder, but saw: %s" %
                        type(decoder))

    with tf.variable_scope(scope, "decoder") as varscope:
        # Determine context types.
        ctxt = tf.get_default_graph()._get_control_flow_context()  # pylint: disable=protected-access
        is_xla = control_flow_util.GetContainingXLAContext(ctxt) is not None
        in_while_loop = (control_flow_util.GetContainingWhileContext(ctxt)
                         is not None)
        # Properly cache variable values inside the while_loop.
        # Don't set a caching device when running in a loop, since it is possible
        # that train steps could be wrapped in a tf.while_loop. In that scenario
        # caching prevents forward computations in loop iterations from re-reading
        # the updated weights.
        if not tf.executing_eagerly() and not in_while_loop:
            if varscope.caching_device is None:
                varscope.set_caching_device(lambda op: op.device)

        if maximum_iterations is not None:
            maximum_iterations = tf.convert_to_tensor(
                maximum_iterations, dtype=tf.int32, name="maximum_iterations")
            if maximum_iterations.get_shape().ndims != 0:
                raise ValueError("maximum_iterations must be a scalar")

        initial_finished, initial_inputs, initial_state = decoder.initialize()

        zero_outputs = _create_zero_outputs(decoder.output_size,
                                            decoder.output_dtype)

        if is_xla and maximum_iterations is None:
            raise ValueError(
                "maximum_iterations is required for XLA compilation.")
        if maximum_iterations is not None:
            initial_finished = tf.logical_or(initial_finished,
                                             0 >= maximum_iterations)
        initial_sequence_lengths = tf.zeros_like(initial_finished,
                                                 dtype=tf.int32)
        initial_time = tf.constant(0, dtype=tf.int32)

        dynamic_size = maximum_iterations is None or not is_xla

        def _create_ta(s, d):
            return tf.TensorArray(
                dtype=d,
                size=0 if dynamic_size else maximum_iterations,
                dynamic_size=dynamic_size,
                element_shape=s)

        initial_outputs_ta = tf.contrib.framework.nest.map_structure(
            _create_ta, decoder.output_size, decoder.output_dtype)

        def condition(unused_time, unused_outputs_ta, unused_state,
                      unused_inputs, finished, unused_sequence_lengths):
            return tf.logical_not(tf.reduce_all(finished))

        def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
            """Internal while_loop body.

      Args:
        time: scalar int32 tensor.
        outputs_ta: structure of TensorArray.
        state: (structure of) state tensors and TensorArrays.
        inputs: (structure of) input tensors.
        finished: bool tensor (keeping track of what's finished).
        sequence_lengths: int32 tensor (keeping track of time of finish).

      Returns:
        `(time + 1, outputs_ta, next_state, next_inputs, next_finished,
          next_sequence_lengths)`.
        ```
      """
            (next_outputs, decoder_state, next_inputs,
             decoder_finished) = decoder.step(time, inputs, state)
            if decoder.tracks_own_finished:
                next_finished = decoder_finished
            else:
                next_finished = tf.logical_or(decoder_finished, finished)
            next_sequence_lengths = tf.where(
                tf.logical_not(finished),
                tf.fill(tf.shape(sequence_lengths), time + 1),
                sequence_lengths)

            tf.contrib.framework.nest.assert_same_structure(
                state, decoder_state)
            tf.contrib.framework.nest.assert_same_structure(
                outputs_ta, next_outputs)
            tf.contrib.framework.nest.assert_same_structure(
                inputs, next_inputs)

            # Zero out output values past finish
            if impute_finished:
                emit = tf.contrib.framework.nest.map_structure(
                    lambda out, zero: tf.where(finished, zero, out),
                    next_outputs, zero_outputs)
            else:
                emit = next_outputs

            # Copy through states past finish
            def _maybe_copy_state(new, cur):
                # TensorArrays and scalar states get passed through.
                if isinstance(cur, tf.TensorArray):
                    pass_through = True
                else:
                    new.set_shape(cur.shape)
                    pass_through = (new.shape.ndims == 0)
                return new if pass_through else tf.where(finished, cur, new)

            if impute_finished:
                next_state = tf.contrib.framework.nest.map_structure(
                    _maybe_copy_state, decoder_state, state)
            else:
                next_state = decoder_state

            outputs_ta = tf.contrib.framework.nest.map_structure(
                lambda ta, out: ta.write(time, out), outputs_ta, emit)
            return (time + 1, outputs_ta, next_state, next_inputs,
                    next_finished, next_sequence_lengths)

        res = tf.while_loop(condition,
                            body,
                            loop_vars=(
                                initial_time,
                                initial_outputs_ta,
                                initial_state,
                                initial_inputs,
                                initial_finished,
                                initial_sequence_lengths,
                            ),
                            parallel_iterations=parallel_iterations,
                            maximum_iterations=maximum_iterations,
                            swap_memory=swap_memory)

        final_outputs_ta = res[1]
        final_state = res[2]
        final_sequence_lengths = res[5]

        final_outputs = tf.contrib.framework.nest.map_structure(
            lambda ta: ta.stack(), final_outputs_ta)

        try:
            final_outputs, final_state = decoder.finalize(
                final_outputs, final_state, final_sequence_lengths)
        except NotImplementedError:
            pass

    return final_state.pred_ids
예제 #57
0
    def __init__(
            self,
            dir_name,
            session,  # TensorFlow session
            size_image=128,  # size the input images
            size_kernel=5,  # size of the kernels in convolution and deconvolution
            size_batch=100,  # mini-batch size for training and testing, must be square of an integer
            num_input_channels=3,  # number of channels of input images
            num_encoder_channels=64,  # number of channels of the first conv layer of encoder
            num_z_channels=50,  # number of channels of the layer z (noise or code)
            num_categories=10,  # number of categories (age segments) in the training dataset
            num_gen_channels=1024,  # number of channels of the first deconv layer of generator
            enable_tile_label=True,  # enable to tile the label
            tile_ratio=1.0,  # ratio of the length between tiled label and z
            is_training=True,  # flag for training or testing mode
            save_dir='./save',  # path to save checkpoints, samples, and summary
            dataset_name='UTKFace',  # name of the dataset in the folder ./data
            learning_rate=0.0002):
        self.dir_name = dir_name
        self.session = session
        self.image_value_range = (-1, 1)
        self.size_image = size_image
        self.size_kernel = size_kernel
        self.size_batch = size_batch
        self.num_input_channels = num_input_channels
        self.num_encoder_channels = num_encoder_channels
        self.num_z_channels = num_z_channels
        self.num_categories = num_categories
        self.num_gen_channels = num_gen_channels
        self.enable_tile_label = enable_tile_label
        self.tile_ratio = tile_ratio
        self.is_training = is_training
        self.save_dir = save_dir
        self.dataset_name = dataset_name
        self.learning_rate = learning_rate
        # ************************************* input to graph ********************************************************
        self.input_image = tf.placeholder(tf.float32, [
            self.size_batch, self.size_image, self.size_image,
            self.num_input_channels
        ],
                                          name='input_images')

        self.age = tf.placeholder(tf.float32,
                                  [self.size_batch, self.num_categories],
                                  name='age_labels')

        self.gender = tf.placeholder(tf.float32, [self.size_batch, 2],
                                     name='gender_labels')

        self.z_prior = tf.placeholder(tf.float32,
                                      [self.size_batch, self.num_z_channels],
                                      name='z_prior')

        # ************************************* build the graph *******************************************************
        print('\n\tBuilding graph ...')

        # encoder: input image --> z
        self.z = self.encoder(image=self.input_image)

        # generator: z + label --> generated image
        self.G = self.generator(z=self.z,
                                y=self.age,
                                gender=self.gender,
                                enable_tile_label=self.enable_tile_label,
                                tile_ratio=self.tile_ratio)

        # discriminator on z
        self.D_z, self.D_z_logits = self.discriminator_z(
            z=self.z, is_training=self.is_training)

        # discriminator on G
        self.D_G, self.D_G_logits = self.discriminator_img(
            image=self.G,
            y=self.age,
            gender=self.gender,
            is_training=self.is_training)

        # discriminator on z_prior
        self.D_z_prior, self.D_z_prior_logits = self.discriminator_z(
            z=self.z_prior, is_training=self.is_training, reuse_variables=True)

        # discriminator on input image
        self.D_input, self.D_input_logits = self.discriminator_img(
            image=self.input_image,
            y=self.age,
            gender=self.gender,
            is_training=self.is_training,
            reuse_variables=True)

        # ************************************* loss functions *******************************************************
        # loss function of encoder + generator
        #self.EG_loss = tf.nn.l2_loss(self.input_image - self.G) / self.size_batch  # L2 loss
        self.EG_loss = tf.reduce_mean(tf.abs(self.input_image -
                                             self.G))  # L1 loss
        #         self.identity_loss = tf.reduce_mean(tf

        # loss function of discriminator on z
        self.D_z_loss_prior = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_z_prior_logits,
                labels=tf.ones_like(self.D_z_prior_logits)))
        self.D_z_loss_z = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_z_logits,
                                                    labels=tf.zeros_like(
                                                        self.D_z_logits)))
        self.E_z_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_z_logits,
                                                    labels=tf.ones_like(
                                                        self.D_z_logits)))
        # loss function of discriminator on image
        self.D_img_loss_input = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_input_logits,
                                                    labels=tf.ones_like(
                                                        self.D_input_logits)))
        self.D_img_loss_G = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_G_logits,
                                                    labels=tf.zeros_like(
                                                        self.D_G_logits)))
        self.G_img_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_G_logits,
                                                    labels=tf.ones_like(
                                                        self.D_G_logits)))

        # total variation to smooth the generated image
        tv_y_size = self.size_image
        tv_x_size = self.size_image
        self.tv_loss = ((tf.nn.l2_loss(self.G[:, 1:, :, :] -
                                       self.G[:, :self.size_image - 1, :, :]) /
                         tv_y_size) +
                        (tf.nn.l2_loss(self.G[:, :, 1:, :] -
                                       self.G[:, :, :self.size_image - 1, :]) /
                         tv_x_size)) / self.size_batch

        # *********************************** trainable variables ****************************************************
        trainable_variables = tf.trainable_variables()
        # variables of encoder
        self.E_variables = [
            var for var in trainable_variables if 'E_' in var.name
        ]
        # variables of generator
        self.G_variables = [
            var for var in trainable_variables if 'G_' in var.name
        ]
        # variables of discriminator on z
        self.D_z_variables = [
            var for var in trainable_variables if 'D_z_' in var.name
        ]
        # variables of discriminator on image
        self.D_img_variables = [
            var for var in trainable_variables if 'D_img_' in var.name
        ]

        # ************************************* collect the summary ***************************************
        self.z_summary = tf.summary.histogram('z', self.z)
        self.z_prior_summary = tf.summary.histogram('z_prior', self.z_prior)
        self.EG_loss_summary = tf.summary.scalar('EG_loss', self.EG_loss)
        self.D_z_loss_z_summary = tf.summary.scalar('D_z_loss_z',
                                                    self.D_z_loss_z)
        self.D_z_loss_prior_summary = tf.summary.scalar(
            'D_z_loss_prior', self.D_z_loss_prior)
        self.E_z_loss_summary = tf.summary.scalar('E_z_loss', self.E_z_loss)
        self.D_z_logits_summary = tf.summary.histogram('D_z_logits',
                                                       self.D_z_logits)
        self.D_z_prior_logits_summary = tf.summary.histogram(
            'D_z_prior_logits', self.D_z_prior_logits)
        self.D_img_loss_input_summary = tf.summary.scalar(
            'D_img_loss_input', self.D_img_loss_input)
        self.D_img_loss_G_summary = tf.summary.scalar('D_img_loss_G',
                                                      self.D_img_loss_G)
        self.G_img_loss_summary = tf.summary.scalar('G_img_loss',
                                                    self.G_img_loss)
        self.D_G_logits_summary = tf.summary.histogram('D_G_logits',
                                                       self.D_G_logits)
        self.D_input_logits_summary = tf.summary.histogram(
            'D_input_logits', self.D_input_logits)
        # for saving the graph and variables
        self.saver = tf.train.Saver(max_to_keep=2)
예제 #58
0
def count_non_blank(tensor: tf.Tensor, blank: int or tf.Tensor = 0, axis=None):
    return tf.reduce_sum(tf.where(tf.not_equal(tensor, blank), x=tf.ones_like(tensor), y=tf.zeros_like(tensor)), axis=axis)
예제 #59
0
    def build_model(self):

        # Placeholders for real training samples
        self.input_A_real = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_A_real')
        self.input_B_real = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_B_real')
        # Placeholders for fake generated samples
        self.input_A_fake = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_A_fake')
        self.input_B_fake = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_B_fake')
        # Placeholder for test samples
        self.input_A_test = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_A_test')
        self.input_B_test = tf.placeholder(tf.float32,
                                           shape=self.input_shape,
                                           name='input_B_test')

        self.generation_B = self.generator(inputs=self.input_A_real,
                                           dim=self.num_features,
                                           batch_size=self.batch_size,
                                           reuse=False,
                                           scope_name='generator_A2B')
        self.cycle_A = self.generator(inputs=self.generation_B,
                                      dim=self.num_features,
                                      batch_size=self.batch_size,
                                      reuse=False,
                                      scope_name='generator_B2A')

        self.generation_A = self.generator(inputs=self.input_B_real,
                                           dim=self.num_features,
                                           batch_size=self.batch_size,
                                           reuse=True,
                                           scope_name='generator_B2A')
        self.cycle_B = self.generator(inputs=self.generation_A,
                                      dim=self.num_features,
                                      batch_size=self.batch_size,
                                      reuse=True,
                                      scope_name='generator_A2B')

        self.generation_A_identity = self.generator(inputs=self.input_A_real,
                                                    dim=self.num_features,
                                                    batch_size=self.batch_size,
                                                    reuse=True,
                                                    scope_name='generator_B2A')
        self.generation_B_identity = self.generator(inputs=self.input_B_real,
                                                    dim=self.num_features,
                                                    batch_size=self.batch_size,
                                                    reuse=True,
                                                    scope_name='generator_A2B')

        self.discrimination_A_fake = self.discriminator(
            inputs=self.generation_A,
            reuse=False,
            scope_name='discriminator_A')
        self.discrimination_B_fake = self.discriminator(
            inputs=self.generation_B,
            reuse=False,
            scope_name='discriminator_B')

        # Cycle loss
        self.cycle_loss = l1_loss(y=self.input_A_real,
                                  y_hat=self.cycle_A) + l1_loss(
                                      y=self.input_B_real, y_hat=self.cycle_B)

        # Identity loss
        self.identity_loss = l1_loss(
            y=self.input_A_real, y_hat=self.generation_A_identity) + l1_loss(
                y=self.input_B_real, y_hat=self.generation_B_identity)

        # Place holder for lambda_cycle and lambda_identity
        self.lambda_cycle = tf.placeholder(tf.float32,
                                           None,
                                           name='lambda_cycle')
        self.lambda_identity = tf.placeholder(tf.float32,
                                              None,
                                              name='lambda_identity')

        # Generator loss
        # Generator wants to fool discriminator
        self.generator_loss_A2B = l2_loss(y=tf.ones_like(
            self.discrimination_B_fake),
                                          y_hat=self.discrimination_B_fake)
        self.generator_loss_B2A = l2_loss(y=tf.ones_like(
            self.discrimination_A_fake),
                                          y_hat=self.discrimination_A_fake)

        # Merge the two generators and the cycle loss
        self.generator_loss = self.generator_loss_A2B + self.generator_loss_B2A + self.lambda_cycle * self.cycle_loss + self.lambda_identity * self.identity_loss

        # Discriminator loss
        self.discrimination_input_A_real = self.discriminator(
            inputs=self.input_A_real, reuse=True, scope_name='discriminator_A')
        self.discrimination_input_B_real = self.discriminator(
            inputs=self.input_B_real, reuse=True, scope_name='discriminator_B')
        self.discrimination_input_A_fake = self.discriminator(
            inputs=self.input_A_fake, reuse=True, scope_name='discriminator_A')
        self.discrimination_input_B_fake = self.discriminator(
            inputs=self.input_B_fake, reuse=True, scope_name='discriminator_B')

        # Discriminator wants to classify real and fake correctly
        self.discriminator_loss_input_A_real = l2_loss(
            y=tf.ones_like(self.discrimination_input_A_real),
            y_hat=self.discrimination_input_A_real)
        self.discriminator_loss_input_A_fake = l2_loss(
            y=tf.zeros_like(self.discrimination_input_A_fake),
            y_hat=self.discrimination_input_A_fake)
        self.discriminator_loss_A = (self.discriminator_loss_input_A_real +
                                     self.discriminator_loss_input_A_fake) / 2

        self.discriminator_loss_input_B_real = l2_loss(
            y=tf.ones_like(self.discrimination_input_B_real),
            y_hat=self.discrimination_input_B_real)
        self.discriminator_loss_input_B_fake = l2_loss(
            y=tf.zeros_like(self.discrimination_input_B_fake),
            y_hat=self.discrimination_input_B_fake)
        self.discriminator_loss_B = (self.discriminator_loss_input_B_real +
                                     self.discriminator_loss_input_B_fake) / 2

        # Merge the two discriminators into one
        self.discriminator_loss = self.discriminator_loss_A + self.discriminator_loss_B

        # Categorize variables because we have to optimize the two sets of the variables separately
        trainable_variables = tf.trainable_variables()
        self.discriminator_vars = [
            var for var in trainable_variables if 'discriminator' in var.name
        ]
        self.generator_vars = [
            var for var in trainable_variables if 'generator' in var.name
        ]
        # for var in t_vars: print(var.name)

        # Reserved for test
        self.generation_B_test = self.generator(inputs=self.input_A_test,
                                                dim=self.num_features,
                                                batch_size=self.batch_size,
                                                reuse=True,
                                                scope_name='generator_A2B')
        self.generation_A_test = self.generator(inputs=self.input_B_test,
                                                dim=self.num_features,
                                                batch_size=self.batch_size,
                                                reuse=True,
                                                scope_name='generator_B2A')
예제 #60
0
파일: zero.py 프로젝트: 1036225283/python
# encoding: utf-8

import tensorflow as tf

session = tf.Session()

# 初始化一个对象,其数据置为0
data = tf.zeros([2, 8], dtype=tf.float16)
print("创建2*8全为0的数据\n", session.run(data))

m2 = tf.constant([4, 4])
print("before zero copy = \n", session.run(m2))

# 拷贝一个对象,其数据置为0
data = tf.zeros_like(m2)
print("拷贝m2,并将数据置为0 = \n", session.run(data))

# 创建一个全为1的对象
data = tf.ones([2, 8], dtype=tf.int32)
print("创建2*8全为1的数据\n", session.run(data))

# 拷贝一个对象,其数据置为1
data = tf.ones_like(m2)
print("拷贝m2,并将数据置为1\n", session.run(data))

data = tf.fill([3, 3], 4)
print("创建一个对象,并填充指定数据\n", session.run(data))