Example #1
0
    def testArgRenames(self):
        with self.test_session():

            a = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
            b = [[True, False, False], [False, True, True]]
            dim0 = [1]
            dim1 = [1]

            self.assertAllEqual(tf.reduce_any(b, reduction_indices=dim0).eval(), [True, True])
            self.assertAllEqual(tf.reduce_all(b, reduction_indices=[0]).eval(), [False, False, False])
            self.assertAllEqual(tf.reduce_all(b, reduction_indices=dim1).eval(), [False, False])
            self.assertAllEqual(tf.reduce_sum(a, reduction_indices=[1]).eval(), [6.0, 15.0])
            self.assertAllEqual(tf.reduce_sum(a, reduction_indices=[0, 1]).eval(), 21.0)
            self.assertAllEqual(tf.reduce_sum(a, [0, 1]).eval(), 21.0)
            self.assertAllEqual(tf.reduce_prod(a, reduction_indices=[1]).eval(), [6.0, 120.0])
            self.assertAllEqual(tf.reduce_prod(a, reduction_indices=[0, 1]).eval(), 720.0)
            self.assertAllEqual(tf.reduce_prod(a, [0, 1]).eval(), 720.0)
            self.assertAllEqual(tf.reduce_mean(a, reduction_indices=[1]).eval(), [2.0, 5.0])
            self.assertAllEqual(tf.reduce_mean(a, reduction_indices=[0, 1]).eval(), 3.5)
            self.assertAllEqual(tf.reduce_mean(a, [0, 1]).eval(), 3.5)
            self.assertAllEqual(tf.reduce_min(a, reduction_indices=[1]).eval(), [1.0, 4.0])
            self.assertAllEqual(tf.reduce_min(a, reduction_indices=[0, 1]).eval(), 1.0)
            self.assertAllEqual(tf.reduce_min(a, [0, 1]).eval(), 1.0)
            self.assertAllEqual(tf.reduce_max(a, reduction_indices=[1]).eval(), [3.0, 6.0])
            self.assertAllEqual(tf.reduce_max(a, reduction_indices=[0, 1]).eval(), 6.0)
            self.assertAllEqual(tf.reduce_max(a, [0, 1]).eval(), 6.0)
            self.assertAllClose(tf.reduce_logsumexp(a, reduction_indices=[1]).eval(), [3.40760589, 6.40760612])
            self.assertAllClose(tf.reduce_logsumexp(a, reduction_indices=[0, 1]).eval(), 6.45619344711)
            self.assertAllClose(tf.reduce_logsumexp(a, [0, 1]).eval(), 6.45619344711)
            self.assertAllEqual(tf.expand_dims([[1, 2], [3, 4]], dim=1).eval(), [[[1, 2]], [[3, 4]]])
Example #2
0
def gen_debug_td_error_summaries(
    target_q_values, q_values, td_targets, td_errors):
  """Generates debug summaries for critic given a set of batch samples.

  Args:
    target_q_values: set of predicted next stage values.
    q_values: current predicted value for the critic network.
    td_targets: discounted target_q_values with added next stage reward.
    td_errors: the different between td_targets and q_values.
  """
  with tf.name_scope('td_errors'):
    tf.summary.histogram('td_targets', td_targets)
    tf.summary.histogram('q_values', q_values)
    tf.summary.histogram('target_q_values', target_q_values)
    tf.summary.histogram('td_errors', td_errors)
    with tf.name_scope('td_targets'):
      tf.summary.scalar('mean', tf.reduce_mean(td_targets))
      tf.summary.scalar('max', tf.reduce_max(td_targets))
      tf.summary.scalar('min', tf.reduce_min(td_targets))
    with tf.name_scope('q_values'):
      tf.summary.scalar('mean', tf.reduce_mean(q_values))
      tf.summary.scalar('max', tf.reduce_max(q_values))
      tf.summary.scalar('min', tf.reduce_min(q_values))
    with tf.name_scope('target_q_values'):
      tf.summary.scalar('mean', tf.reduce_mean(target_q_values))
      tf.summary.scalar('max', tf.reduce_max(target_q_values))
      tf.summary.scalar('min', tf.reduce_min(target_q_values))
    with tf.name_scope('td_errors'):
      tf.summary.scalar('mean', tf.reduce_mean(td_errors))
      tf.summary.scalar('max', tf.reduce_max(td_errors))
      tf.summary.scalar('min', tf.reduce_min(td_errors))
      tf.summary.scalar('mean_abs', tf.reduce_mean(tf.abs(td_errors)))
def init_data(inputFile, K):
    global training_data, validation_data, centroids, training_num, data_dim, centroids_num 
    global tf_data_set, tf_centroids
    # initialize data and centroids
    data = np.float32( np.load(inputFile))
    data = (data - data.mean()) / data.std()
    # update data_num and centroids_num
    data_num, data_dim = data.shape
    centroids_num = K
    # training data and validation data
    training_num = int(2./3 * data_num)
    training_data = data[:training_num]
    validation_data = data[training_num:]
    centroids = tf.truncated_normal(shape=[centroids_num, data_dim])
    # update tf_data_set and tf_centroids
    tf_data_set = tf.placeholder(tf.float32, shape=[None, data_dim])
    tf_centroids = tf.Variable(tf.convert_to_tensor(centroids, dtype=tf.float32))
    ########### for the training cases #####################
    # get the euclidean distance
    tf_train_dist = euclidean_dist(tf_data_set, tf_centroids, training_num, centroids_num)
    # get the min index for data set
    tf_train_min_index = tf.argmin(tf_train_dist, dimension=1)
    # loss and optimizer
    tf_train_loss = tf.reduce_sum(tf.reduce_min(euclidean_dist(tf_data_set, tf_centroids, training_num, centroids_num), 
        1, keep_dims=True))
    tf_train_opt = tf.train.AdamOptimizer(learning_rate=0.1, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(tf_train_loss)
    ########### for the validation cases ####################
    tf_valid_dist = euclidean_dist(tf_data_set, tf_centroids, (data_num-training_num), centroids_num)
    tf_valid_min_index = tf.argmin(tf_valid_dist, dimension=1)
    tf_valid_loss = tf.reduce_sum(tf.reduce_min(euclidean_dist(tf_data_set, tf_centroids, (data_num-training_num), centroids_num), 
        1, keep_dims=True))
    return tf_train_min_index, tf_train_loss, tf_train_opt, tf_valid_loss
Example #4
0
 def coverage_box(bboxes):
   y_min, x_min, y_max, x_max = tf.split(
       value=bboxes, num_or_size_splits=4, axis=1)
   y_min_coverage = tf.reduce_min(y_min, axis=0)
   x_min_coverage = tf.reduce_min(x_min, axis=0)
   y_max_coverage = tf.reduce_max(y_max, axis=0)
   x_max_coverage = tf.reduce_max(x_max, axis=0)
   return tf.stack(
       [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
       axis=1)
  def __init__(self, reuse=False, trainable=True):
    # Placeholders for our input
    # Our input are 4 RGB frames of shape 160, 160 each
    self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
    # The TD target value
    self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="y")

    X = tf.to_float(self.states) / 255.0
    batch_size = tf.shape(self.states)[0]

    # Graph shared with Value Net
    with tf.variable_scope("shared", reuse=reuse):
      fc1 = build_shared_network(X, add_summaries=(not reuse))

    with tf.variable_scope("value_net"):
      self.logits = tf.contrib.layers.fully_connected(
        inputs=fc1,
        num_outputs=1,
        activation_fn=None)
      self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name="logits")

      self.losses = tf.squared_difference(self.logits, self.targets)
      self.loss = tf.reduce_sum(self.losses, name="loss")

      self.predictions = {
        "logits": self.logits
      }

      # Summaries
      prefix = tf.get_variable_scope().name
      tf.scalar_summary(self.loss.name, self.loss)
      tf.scalar_summary("{}/max_value".format(prefix), tf.reduce_max(self.logits))
      tf.scalar_summary("{}/min_value".format(prefix), tf.reduce_min(self.logits))
      tf.scalar_summary("{}/mean_value".format(prefix), tf.reduce_mean(self.logits))
      tf.scalar_summary("{}/reward_max".format(prefix), tf.reduce_max(self.targets))
      tf.scalar_summary("{}/reward_min".format(prefix), tf.reduce_min(self.targets))
      tf.scalar_summary("{}/reward_mean".format(prefix), tf.reduce_mean(self.targets))
      tf.histogram_summary("{}/reward_targets".format(prefix), self.targets)
      tf.histogram_summary("{}/values".format(prefix), self.logits)

      if trainable:
        # self.optimizer = tf.train.AdamOptimizer(1e-4)
        self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
        self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
        self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]
        self.train_op = self.optimizer.apply_gradients(self.grads_and_vars,
          global_step=tf.contrib.framework.get_global_step())

    var_scope_name = tf.get_variable_scope().name
    summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
    sumaries = [s for s in summary_ops if "policy_net" in s.name or "shared" in s.name]
    sumaries = [s for s in summary_ops if var_scope_name in s.name]
    self.summaries = tf.merge_summary(sumaries)
Example #6
0
    def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding=DEFAULT_PADDING, group=1, trainable=True):
        
        print name     
        if isinstance(input, tuple):
            input = input[0]   

        self.validate_padding(padding)
        c_i = input.get_shape()[-1]
        print c_i
        print input.get_shape().as_list()
        assert c_i%group==0
        assert c_o%group==0
        convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
        with tf.variable_scope(name) as scope:
            init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
            init_biases = tf.constant_initializer(0.0)
            kernel = self.make_var('weights', [k_h, k_w, c_i/group, c_o], init_weights, trainable)
            biases = self.make_var('biases', [c_o], init_biases, trainable)

            
            with tf.name_scope('summaries'):
                with tf.name_scope('weights'):
                    mean = tf.reduce_mean(kernel)
                    tf.summary.scalar('mean', mean)
                    with tf.name_scope('stddev'):
                        stddev = tf.sqrt(tf.reduce_mean(tf.square(kernel- mean)))
                    tf.summary.scalar('stddev', stddev)
                    tf.summary.scalar('max', tf.reduce_max(kernel))
                    tf.summary.scalar('min', tf.reduce_min(kernel))
                    tf.summary.histogram('histogram', kernel)
                with tf.name_scope('biases'):
                    mean = tf.reduce_mean(biases)
                    tf.summary.scalar('mean', mean)
                    with tf.name_scope('stddev'):
                        stddev = tf.sqrt(tf.reduce_mean(tf.square(biases- mean)))
                    tf.summary.scalar('stddev', stddev)
                    tf.summary.scalar('max', tf.reduce_max(biases))
                    tf.summary.scalar('min', tf.reduce_min(biases))
                    tf.summary.histogram('histogram', biases)


            if group==1:
                conv = convolve(input, kernel)
            else:
                input_groups = tf.split(3, group, input)
                kernel_groups = tf.split(3, group, kernel)
                output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
                conv = tf.concat(3, output_groups)
            if relu:
                bias = tf.nn.bias_add(conv, biases)
                return tf.nn.relu(bias, name=scope.name)
            return tf.nn.bias_add(conv, biases, name=scope.name)
Example #7
0
    def fc(self, input, num_out, name, relu=True, trainable=True):

        print name
        with tf.variable_scope(name) as scope:
            # only use the first input
            if isinstance(input, tuple):
                input = input[0]

            input_shape = input.get_shape()
            if input_shape.ndims == 4:
                dim = 1
                for d in input_shape[1:].as_list():
                    dim *= d
                feed_in = tf.reshape(tf.transpose(input,[0,3,1,2]), [-1, dim])
            else:
                feed_in, dim = (input, int(input_shape[-1]))

            if name == 'bbox_pred':
                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
                init_biases = tf.constant_initializer(0.0)
            else:
                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
                init_biases = tf.constant_initializer(0.0)

            weights = self.make_var('weights', [dim, num_out], init_weights, trainable)
            biases = self.make_var('biases', [num_out], init_biases, trainable)

            with tf.name_scope('summaries'):
                with tf.name_scope('weights'):
                    mean = tf.reduce_mean(weights)
                    tf.summary.scalar('mean', mean)
                    with tf.name_scope('stddev'):
                        stddev = tf.sqrt(tf.reduce_mean(tf.square(weights- mean)))
                    tf.summary.scalar('stddev', stddev)
                    tf.summary.scalar('max', tf.reduce_max(weights))
                    tf.summary.scalar('min', tf.reduce_min(weights))
                    tf.summary.histogram('histogram', weights)
                with tf.name_scope('biases'):
                    mean = tf.reduce_mean(biases)
                    tf.summary.scalar('mean', mean)
                    with tf.name_scope('stddev'):
                        stddev = tf.sqrt(tf.reduce_mean(tf.square(biases- mean)))
                    tf.summary.scalar('stddev', stddev)
                    tf.summary.scalar('max', tf.reduce_max(biases))
                    tf.summary.scalar('min', tf.reduce_min(biases))
                    tf.summary.histogram('histogram', biases)

            op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
            fc = op(feed_in, weights, biases, name=scope.name)
            return fc
Example #8
0
def print_act_stats(x, _str=""):
    if not do_print_act_stats:
        return x
    if hvd.rank() != 0:
        return x
    if len(x.get_shape()) == 1:
        x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
    if len(x.get_shape()) == 2:
        x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
    if len(x.get_shape()) == 4:
        x_mean, x_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True)
    stats = [tf.reduce_min(x_mean), tf.reduce_mean(x_mean), tf.reduce_max(x_mean),
             tf.reduce_min(tf.sqrt(x_var)), tf.reduce_mean(tf.sqrt(x_var)), tf.reduce_max(tf.sqrt(x_var))]
    return tf.Print(x, stats, "["+_str+"] "+x.name)
Example #9
0
 def compute_lookup_error(self, val):
   #computes lookup error.
   cond = tf.equal(self.batch_print_answer, val)
   inter = tf.where(
       cond, self.init_print_error,
       tf.tile(
           tf.reshape(tf.constant(1e10, self.data_type), [1, 1, 1]), [
               self.batch_size, self.utility.FLAGS.max_word_cols +
               self.utility.FLAGS.max_number_cols,
               self.utility.FLAGS.max_elements
           ]))
   return tf.reduce_min(tf.reduce_min(inter, 1), 1) * tf.cast(
       tf.greater(
           tf.reduce_sum(tf.reduce_sum(tf.cast(cond, self.data_type), 1), 1),
           0.0), self.data_type)
Example #10
0
def kmeans(data, lr, K, epochs=800):
    """
    example of kmeans algorithm
    """
    M, D = data.shape
    train_data = data[:2*M/3]
    valid_data = data[2*M/3:]

    g = tf.Graph() 
    with g.as_default():
        x = tf.placeholder(tf.float32, shape=(None, D))
        mu = tf.Variable(tf.truncated_normal([K, D], dtype=tf.float32))

        cost = tf.reduce_sum(tf.reduce_min(utils.L2_dist(x, mu), 1))
        optimizer = tf.train.AdamOptimizer(lr, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(cost)


    with tf.Session(graph=g) as session:
        tf.initialize_all_variables().run()

        l = []
        for epoch in range(epochs):
            x_batch = train_data
            feed_dict = {x: x_batch}
            _, c = session.run([optimizer, cost], feed_dict=feed_dict)
            l.append(c)
            if epoch % 100 == 0:
                print "Epoch %03d, training loss: %.1f" % (epoch, c)
        feed_dict = {x:valid_data}
        c, mu = session.run([cost, mu], feed_dict=feed_dict)
        print "Validation loss: %.1f" % c

    return  {'training_loss': l,
             'validation_loss': c,
             'mu': mu}
def disjunction_of_literals(literals, label="no_label"):
    list_of_literal_tensors = [lit.tensor for lit in literals]
    literals_tensor = tf.concat(1,list_of_literal_tensors)
    if default_tnorm == "product":
        result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
    if default_tnorm == "yager2":
        result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
    if default_tnorm == "luk":
        print "data aggregator is lukas"
        result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
        PR(result)
    if default_tnorm == "goedel":
        result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
    if default_aggregator == "product":
        return tf.reduce_prod(result, keep_dims=True)
    if default_aggregator == "mean":
        print "data aggregator is mean"
        return tf.reduce_mean(result, keep_dims=True, name=label)
    if default_aggregator == "gmean":
        return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
                             tf.inv(tf.to_float(tf.size(result)))), name=label)
    if default_aggregator == "hmean":
        print "data aggregator is hmean"
        return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
    if default_aggregator == "min":
        print "data aggregator is min"
        return tf.reduce_min(result, keep_dims=True, name=label)
    if default_aggregator == "qmean":
        print "data aggregator is qmean"
        return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
    if default_aggregator == "cmean":
        print "data aggregator is cmean"
        return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
def flatten_maybe_padded_sequences(maybe_padded_sequences, lengths=None):
  """Flattens the batch of sequences, removing padding (if applicable).

  Args:
    maybe_padded_sequences: A tensor of possibly padded sequences to flatten,
        sized `[N, M, ...]` where M = max(lengths).
    lengths: Optional length of each sequence, sized `[N]`. If None, assumes no
        padding.

  Returns:
     flatten_maybe_padded_sequences: The flattened sequence tensor, sized
         `[sum(lengths), ...]`.
  """
  def flatten_unpadded_sequences():
    # The sequences are equal length, so we should just flatten over the first
    # two dimensions.
    return tf.reshape(maybe_padded_sequences,
                      [-1] + maybe_padded_sequences.shape.as_list()[2:])

  if lengths is None:
    return flatten_unpadded_sequences()

  def flatten_padded_sequences():
    indices = tf.where(tf.sequence_mask(lengths))
    return tf.gather_nd(maybe_padded_sequences, indices)

  return tf.cond(
      tf.equal(tf.reduce_min(lengths), tf.shape(maybe_padded_sequences)[1]),
      flatten_unpadded_sequences,
      flatten_padded_sequences)
Example #13
0
    def cross_min_pool_layer(self, bottom, nc, k, scope="cross_pool"):
        """Min pooling within k channel.

        Calculating the min value cross the k channel, corresponding to choose
        the minimum \mu_k(i, u, j, v).

        Args:
            bottom: Input tensor with size [batch_size, height, width, channel]
            n_in: Channels of `bottom`
            k: Number of components in mixture.

        Kwargs:
            scope: variable_scope for this layer's weights

        Returns:
            pool: Tensor which is the minimum cross k channel.

        """
        with tf.name_scope(scope):
            mixtures = tf.split(3, nc, bottom)
            minimum = []
            for mixture in mixtures:
                t = tf.reduce_min(mixture, 3, True)
                minimum.append(t)
            minimum = tf.concat(3, minimum)
        return minimum
Example #14
0
def _multichannel_image_summary(name, images, perm=[0, 3, 1, 2], max_summary_images=16):
    _min = tf.reduce_min(images)
    _max = tf.reduce_max(images)
    _ = tf.mul(tf.div(tf.add(images, _min), tf.sub(_max, _min)), 255.0)
    _ = tf.transpose(_, perm=perm)
    shape = _.get_shape().as_list()
    tf.image_summary(name, tf.reshape(tf.transpose(_, perm=perm), [reduce(lambda x,y:x*y, shape)/(shape[3]*shape[2]), shape[2], shape[3], 1]), max_images=max_summary_images)
Example #15
0
def variable_summaries(var, name, collection_key):
    """Attach a lot of summaries to a Tensor (for TensorBoard visualization).

    Args:
        - var: Tensor for variable from which we want to log.
        - name: Variable name.
        - collection_key: Collection to save the summary to, can be any key of
          `VAR_LOG_LEVELS`.
    """
    if collection_key not in VAR_LOG_LEVELS.keys():
        raise ValueError('"{}" not in `VAR_LOG_LEVELS`'.format(collection_key))
    collections = VAR_LOG_LEVELS[collection_key]

    with tf.name_scope(name):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean, collections)
        num_params = tf.reduce_prod(tf.shape(var))
        tf.summary.scalar('num_params', num_params, collections)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev, collections)
        tf.summary.scalar('max', tf.reduce_max(var), collections)
        tf.summary.scalar('min', tf.reduce_min(var), collections)
        tf.summary.histogram('histogram', var, collections)
        tf.summary.scalar('sparsity', tf.nn.zero_fraction(var), collections)
def summary(tensor, summary_type=['mean', 'stddev', 'max', 'min', 'sparsity', 'histogram']):
    """ Attach a lot of summaries to a Tensor. """

    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
    # session. This helps the clarity of presentation on tensorboard.
    tensor_name = re.sub('%s_[0-9]*/' % 'tower', '', tensor.name)
    tensor_name = re.sub(':', '-', tensor_name)

    with tf.name_scope('summary_' + tensor_name):
        summaries = []
        if len(tensor._shape) == 0:
            summaries.append(tf.summary.scalar(tensor_name, tensor))
        else:
            if 'mean' in summary_type:
                mean = tf.reduce_mean(tensor)
                summaries.append(tf.summary.scalar(tensor_name + '/mean', mean))
            if 'stddev' in summary_type:
                mean = tf.reduce_mean(tensor)
                stddev = tf.sqrt(tf.reduce_mean(tf.square(tensor - mean)))
                summaries.append(tf.summary.scalar(tensor_name + '/stddev', stddev))
            if 'max' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/max', tf.reduce_max(tensor)))
            if 'min' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/min', tf.reduce_min(tensor)))
            if 'sparsity' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(tensor)))
            if 'histogram' in summary_type:
                summaries.append(tf.summary.histogram(tensor_name, tensor))
        return tf.summary.merge(summaries)
Example #17
0
                def get_losses(obj_mask):
                  """Get motion constraint loss."""
                  # Find height of segment.
                  coords = tf.where(tf.greater(  # Shape (num_true, 2=yx)
                      obj_mask[:, :, 0], tf.constant(0.5, dtype=tf.float32)))
                  y_max = tf.reduce_max(coords[:, 0])
                  y_min = tf.reduce_min(coords[:, 0])
                  seg_height = y_max - y_min
                  f_y = self.intrinsic_mat[i, 0, 1, 1]
                  approx_depth = ((f_y * self.global_scale_var) /
                                  tf.to_float(seg_height))
                  reference_pred = tf.boolean_mask(
                      depth_pred, tf.greater(
                          tf.reshape(obj_mask[:, :, 0],
                                     (self.img_height, self.img_width, 1)),
                          tf.constant(0.5, dtype=tf.float32)))

                  # Establish loss on approx_depth, a scalar, and
                  # reference_pred, our dense prediction. Normalize both to
                  # prevent degenerative depth shrinking.
                  global_mean_depth_pred = tf.reduce_mean(depth_pred)
                  reference_pred /= global_mean_depth_pred
                  approx_depth /= global_mean_depth_pred
                  spatial_err = tf.abs(reference_pred - approx_depth)
                  mean_spatial_err = tf.reduce_mean(spatial_err)
                  return mean_spatial_err
Example #18
0
  def histogram(self, x, value_range=None, nbins=None, name=None):
    """Return histogram of values.

    Given the tensor `values`, this operation returns a rank 1 histogram
    counting the number of entries in `values` that fell into every bin. The
    bins are equal width and determined by the arguments `value_range` and
    `nbins`.

    Args:
      x: 1D numeric `Tensor` of items to count.
      value_range:  Shape [2] `Tensor`. `new_values <= value_range[0]` will be
        mapped to `hist[0]`, `values >= value_range[1]` will be mapped to
        `hist[-1]`. Must be same dtype as `x`.
      nbins:  Scalar `int32 Tensor`.  Number of histogram bins.
      name: Python `str` name prefixed to Ops created by this class.

    Returns:
      counts: 1D `Tensor` of counts, i.e.,
        `counts[i] = sum{ edges[i-1] <= values[j] < edges[i] : j }`.
      edges: 1D `Tensor` characterizing intervals used for counting.
    """
    with tf.name_scope(name, "histogram", [x]):
      x = tf.convert_to_tensor(x, name="x")
      if value_range is None:
        value_range = [tf.reduce_min(x), 1 + tf.reduce_max(x)]
      value_range = tf.convert_to_tensor(value_range, name="value_range")
      lo = value_range[0]
      hi = value_range[1]
      if nbins is None:
        nbins = tf.to_int32(hi - lo)
      delta = (hi - lo) / tf.cast(nbins, dtype=value_range.dtype.base_dtype)
      edges = tf.range(
          start=lo, limit=hi, delta=delta, dtype=x.dtype.base_dtype)
      counts = tf.histogram_fixed_width(x, value_range=value_range, nbins=nbins)
      return counts, edges
Example #19
0
    def when_nonempty():
      min_ = tf.reduce_min(data)
      max_ = tf.reduce_max(data)
      range_ = max_ - min_
      is_singular = tf.equal(range_, 0)

      def when_nonsingular():
        bucket_width = range_ / tf.cast(bucket_count, tf.float64)
        offsets = data - min_
        bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                                 dtype=tf.int32)
        clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
        one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
        bucket_counts = tf.cast(tf.reduce_sum(one_hots, axis=0),
                                dtype=tf.float64)
        edges = tf.lin_space(min_, max_, bucket_count + 1)
        left_edges = edges[:-1]
        right_edges = edges[1:]
        return tf.transpose(tf.stack(
            [left_edges, right_edges, bucket_counts]))

      def when_singular():
        center = min_
        bucket_starts = tf.stack([center - 0.5])
        bucket_ends = tf.stack([center + 0.5])
        bucket_counts = tf.stack([tf.cast(tf.size(data), tf.float64)])
        return tf.transpose(
            tf.stack([bucket_starts, bucket_ends, bucket_counts]))

      return tf.cond(is_singular, when_singular, when_nonsingular)
Example #20
0
def collect_variable_summaries(vars=None):
    """
    Collect the summaries for all variables.
    Returns list of summary operations for the variables.

    :param vars: If specified, will gather summaries for these variables.
                 Otherwise will gather summaries for all summarizable variables in current graph.
    """
    ret = []
    for v in (vars or current_graph().iter_variables(summary=True)):
        name = get_variable_name(v)
        ret.append(histogram_summary(name, v))
        # also generate the mean/min/max/stddev statistics for this variable.
        v_mean = tf.reduce_mean(v)
        v_min = tf.reduce_min(v)
        v_max = tf.reduce_max(v)
        with tf.name_scope('stddev'):
            v_stddev = tf.sqrt(tf.reduce_sum(tf.square(v - v_mean)))
        ret.extend([
            scalar_summary('%s/mean' % name, v_mean),
            scalar_summary('%s/min' % name, v_min),
            scalar_summary('%s/max' % name, v_max),
            scalar_summary('%s/stddev' % name, v_stddev),
        ])
    return ret
Example #21
0
def _summarize_vars_and_grads(grads_and_vars):
  tf.logging.info('Trainable variables:')
  tf.logging.info('-' * 60)
  for grad, var in grads_and_vars:
    tf.logging.info(var)

    def tag(name, v=var):
      return v.op.name + '_' + name

    # Variable summary
    mean = tf.reduce_mean(var)
    tf.summary.scalar(tag('mean'), mean)
    with tf.name_scope(tag('stddev')):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar(tag('stddev'), stddev)
    tf.summary.scalar(tag('max'), tf.reduce_max(var))
    tf.summary.scalar(tag('min'), tf.reduce_min(var))
    tf.summary.histogram(tag('histogram'), var)

    # Gradient summary
    if grad is not None:
      if isinstance(grad, tf.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad

      tf.summary.histogram(tag('gradient'), grad_values)
      tf.summary.scalar(tag('gradient_norm'), tf.global_norm([grad_values]))
    else:
      tf.logging.info('Var %s has no gradient', var.op.name)
Example #22
0
def conv2d_1(name, inputs, shape, strides=1):

    with tf.name_scope(name+"_conv"):
        W = tf.Variable(tf.random_normal(shape))
        tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(W)) 
        x1 = tf.nn.conv2d(inputs, W, strides=[1, strides, strides, 1], padding='SAME', name="conv1")
        if name=='layerM21' and is_training == True:
            tf.summary.scalar('w_mean',tf.reduce_mean(W))
            tf.summary.scalar('w_max',tf.reduce_max(W))
            tf.summary.scalar('w_min',tf.reduce_min(W))


    with tf.name_scope(name+"_bias"):
        B = tf.Variable(tf.random_normal([shape[-1]]))
        tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(B)) 
        x2 = tf.nn.bias_add(x1, B, name="bias1")
    
    with tf.name_scope(name+"_BN"):
        x3 = bn_layer(x2, is_training, name=name)

    with tf.name_scope(name+"_relu"):
        c1_out=leaky_relu(x3)
        #c1_out=tf.nn.leaky_relu(x3)

    return c1_out
Example #23
0
def clip_logits(logits, config):
  logits_clip = getattr(config, "logits_clip", 0.)
  if logits_clip > 0:
    min_logit = tf.reduce_min(logits)
    return tf.minimum(logits - min_logit, logits_clip)
  else:
    return logits
Example #24
0
def test_initialization():
  """ 
  Use this space to test your Xavier initialization code by running:
      python q1_initialization.py 
  This function will not be called by the autograder, nor will
  your tests be graded.
  """
  print "Running your tests..."
  ### YOUR CODE HERE
  shape = (3,3,3)
  
  dim_sum = np.sum(shape)
  if len(shape) == 1:
    dim_sum += 1
  eps = np.sqrt(6.0 / dim_sum)
  
  with tf.variable_scope("init_test", initializer=xavier_weight_init()):
    W = tf.get_variable("W", shape=shape)

  init = tf.initialize_all_variables()

  with tf.Session() as sess:
    sess.run(init)
    print "Weights: \n", W.eval()
    print "Mean of weights: \n", tf.reduce_mean(W).eval()
    assert tf.reduce_max(W).eval() <= eps
    assert tf.reduce_min(W).eval() >= -eps
    print "Your (non-exhaustive) Xavier initialization tests pass\n"
    def put_kernels_on_grid (self, kernel, pad = 1):

        '''Visualize conv. filters as an image (mostly for the 1st layer).
        Arranges filters into a grid, with some paddings between adjacent filters.
        Args:
          kernel:            tensor of shape [Y, X, NumChannels, NumKernels]
          pad:               number of black pixels around each filter (between them)
        Return:
          Tensor of shape [1, (Y+2*pad)*grid_Y, (X+2*pad)*grid_X, NumChannels].
        '''
        if kernel == 0:
            kernel = self.conv_weights_1
        if kernel == 1:
            kernel = self.conv_weights_2
        # get shape of the grid. NumKernels == grid_Y * grid_X
        def factorization(n):
            print("n>",n)
            print("fact>",int(sqrt(float(n))))
            for i in range(int(sqrt(float(n))), 0, -1):
                print("i>", i)
                if n % i == 0:
                    if i == 1: print('Who would enter a prime number of filters')
                    return (i, int(n / i))
        (grid_Y, grid_X) = factorization (kernel.get_shape()[3].value)
        print ('grid: %d = (%d, %d)' % (kernel.get_shape()[3].value, grid_Y, grid_X))
        
        x_min = tf.reduce_min(kernel)
        x_max = tf.reduce_max(kernel)
        #print("x_min>",tf.reduce_min(kernel).eval(session=self.session))
        #print("x_max>",tf.reduce_max(kernel).eval(session=self.session))
        kernel = (kernel - x_min) / (x_max - x_min)
        print("kernelshape>", kernel.get_shape())
        # pad X and Y
        x = tf.pad(kernel, tf.constant( [[pad,pad],[pad, pad],[0,0],[0,0]] ), mode = 'CONSTANT')
        
        # X and Y dimensions, w.r.t. padding
        Y = kernel.get_shape()[0] + 2 * pad
        X = kernel.get_shape()[1] + 2 * pad
        
        channels = kernel.get_shape()[2]
        
        # put NumKernels to the 1st dimension
        x = tf.transpose(x, (3, 0, 1, 2))
        # organize grid on Y axis
        x = tf.reshape(x, tf.stack([grid_X, Y * grid_Y, X, channels]))
        
        # switch X and Y axes
        x = tf.transpose(x, (0, 2, 1, 3))
        # organize grid on X axis
        x = tf.reshape(x, tf.stack([1, X * grid_X, Y * grid_Y, channels]))
        
        # back to normal order (not combining with the next step for clarity)
        x = tf.transpose(x, (2, 1, 3, 0))
        
        # to tf.image_summary order [batch_size, height, width, channels],
        #   where in this case batch_size == 1
        x = tf.transpose(x, (3, 0, 1, 2))
        
        # scaling to [0, 255] is not necessary for tensorboard
        return x 
def writeHistogramSummary(label, tensor):
  with tf.name_scope(label):
    print "histogram ", label, " shape:", tensor.get_shape()
    tf.scalar_summary("%s max: " % label, tf.reduce_max(tensor))
    tf.scalar_summary("%s min: " % label, tf.reduce_min(tensor))
    tf.scalar_summary("%s mean: " % label, tf.reduce_mean(tensor))
    tf.histogram_summary(label, tensor)
def _psd_mask(x):
  """Computes whether each square matrix in the input is positive semi-definite.

  Args:
    x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.

  Returns:
    mask: A floating-point `Tensor` of shape `[B1, ... Bn]`.  Each
      scalar is 1 if the corresponding matrix was PSD, otherwise 0.
  """
  # Allegedly
  # https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
  # it is more efficient to test for positive semi-definiteness by
  # trying to compute the Cholesky decomposition -- the matrix is PSD
  # if you succeed and not PSD if you fail.  However, TensorFlow's
  # Cholesky raises an exception if _any_ of the input matrices are
  # not PSD, from which I don't know how to extract _which ones_, so I
  # proceed by explicitly computing all the eigenvalues and checking
  # whether they are all positive or not.
  #
  # Also, as was discussed in the answer, it is somewhat dangerous to
  # treat SPD-ness as binary in floating-point arithmetic. Cholesky
  # factorization can complete and 'look' like everything is fine
  # (e.g., O(1) entries and a diagonal of all ones) but the matrix can
  # have an exponential condition number.
  eigenvalues, _ = tf.self_adjoint_eig(x)
  return tf.cast(
      tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)
Example #28
0
    def compute_center_coords(self, y_true, y_pred):
        batch_size = tf.shape(y_pred)[0]
        h = tf.shape(y_pred)[1]
        w = tf.shape(y_pred)[2]
        n_chans = tf.shape(y_pred)[3]
        n_dims = 5

        # weighted center of mass
        x = tf.cast(tf.tile(tf.reshape(self.xs, [1, h, w]), [batch_size, 1, 1]), tf.float32)
        y = tf.cast(tf.tile(tf.reshape(self.ys, [1, h, w]), [batch_size, 1, 1]), tf.float32)

        eps = 1e-8
        # grayscale
        pred_gray = tf.reduce_mean(y_pred, axis=-1)  # should be batch_size x h x w
        # normalize
        pred_gray = pred_gray - tf.reduce_min(pred_gray, axis=[1, 2], keepdims=True)
        pred_gray = pred_gray / (eps + tf.reduce_max(pred_gray, axis=[1, 2], keepdims=True))
        pred_gray = tf.clip_by_value(pred_gray, 0., 1.)

        # make each of these (batch_size, 1)
        weighted_x = tf.round(tf.expand_dims(
            tf.reduce_sum(x * pred_gray, axis=[1, 2]) / (eps + tf.reduce_sum(pred_gray, axis=[1, 2])), axis=-1))
        weighted_y = tf.round(tf.expand_dims(
            tf.reduce_sum(y * pred_gray, axis=[1, 2]) / (eps + tf.reduce_sum(pred_gray, axis=[1, 2])), axis=-1))
        batch_indices = tf.reshape(tf.linspace(0., tf.cast(batch_size, tf.float32) - 1., batch_size), [batch_size, 1])
        indices = tf.cast(tf.concat([batch_indices, weighted_y, weighted_x], axis=-1), tf.int32)
        #center_rgb = transform_network_utils.interpolate([y_true,  weighted_x, weighted_y], constant_vals=1.)
        center_rgb = tf.gather_nd(y_true, indices)
        center_rgb = tf.reshape(center_rgb, [batch_size, n_chans])

        center_point_xyrgb = tf.concat([
                        weighted_x, weighted_y, center_rgb
                    ], axis=-1)

        return pred_gray, center_point_xyrgb
 def __init__(self, label, clauses, save_path=""):
     print "defining the knowledge base", label
     self.label = label
     self.clauses = clauses
     self.parameters = [par for cl in self.clauses for par in cl.parameters]
     if not self.clauses:
         self.tensor = tf.constant(1.0)
     else:
         clauses_value_tensor = tf.concat(0, [cl.tensor for cl in clauses])
         if default_clauses_aggregator == "min":
             print "clauses aggregator is min"
             self.tensor = tf.reduce_min(clauses_value_tensor)
         if default_clauses_aggregator == "mean":
             print "clauses aggregator is mean"
             self.tensor = tf.reduce_mean(clauses_value_tensor)
         if default_clauses_aggregator == "hmean":
             print "clauses aggregator is hmean"
             self.tensor = tf.div(tf.to_float(tf.size(clauses_value_tensor)), tf.reduce_sum(tf.inv(clauses_value_tensor), keep_dims=True))
         if default_clauses_aggregator == "wmean":
             print "clauses aggregator is weighted mean"
             weights_tensor = tf.constant([cl.weight for cl in clauses])
             self.tensor = tf.div(tf.reduce_sum(tf.mul(weights_tensor, clauses_value_tensor)), tf.reduce_sum(weights_tensor))
     if default_positive_fact_penality != 0:
         self.loss = smooth(self.parameters) + \
                     tf.mul(default_positive_fact_penality, self.penalize_positive_facts()) - \
                     PR(self.tensor)
     else:
         self.loss = smooth(self.parameters) - PR(self.tensor)
     self.save_path = save_path
     self.train_op = train_op(self.loss, default_optimizer)
     self.saver = tf.train.Saver(max_to_keep=20)
     print "knowledge base", label, "is defined"
def model_train(k):
    data = np.float32(np.load('data100D.npy'))
    sample_num = data.shape[0]
    dim = data.shape[1]
    cluster = k

    tf_data = tf.placeholder(tf.float32, shape=(sample_num, dim))
    tf_centroids = tf.Variable(tf.truncated_normal([k, dim], mean=0.0, stddev=1.0))
    tf_min_index = tf.argmin(eucl_distance(tf_data, tf_centroids), dimension = 1)
    tf_loss = tf.reduce_sum(tf.reduce_min(eucl_distance(tf_data, tf_centroids),1,keep_dims=True))
    optimizer = tf.train.AdamOptimizer(0.01,0.9,0.99,1e-5).minimize(tf_loss)

    sess = tf.InteractiveSession()

    init = tf.initialize_all_variables()
    init.run()

    epoch = 1000
    loss_list = []
    for i in range(epoch):
        feed_dict = {tf_data: data}
        _, loss, assignments, centroids = sess.run([optimizer, tf_loss, tf_min_index, tf_centroids], feed_dict = feed_dict)
        loss_list.append(loss)
        if (i % 50== 0):
            print("Loss at step %d: %f" % (i, loss))

    cal_percentage(assignments, k)

    plt.title('the loss vs the number of updates 100-D')
    plt.xlabel('the number of updates')
    plt.ylabel('the value of the loss')
    plt.plot(range(len(loss_list)), loss_list)
    plt.show()
    return loss
Example #31
0
def cell_list_nl(tensors, rc=5.0):
    """ Compute neighbour list with celllist approach
    https://en.wikipedia.org/wiki/Cell_lists
    This is very lengthy and confusing implementation of cell list nl.
    Probably needs optimization outside Tensorflow.

    The function expects a dictionary of tensors from a sparse_batch
    with keys: 'ind_1', 'coord' and optionally 'cell'
    """
    atom_sind = tensors['ind_1']
    atom_apos = tensors['coord']
    atom_gind = tf.cumsum(tf.ones_like(atom_sind), 0)
    atom_aind = atom_gind - 1
    to_collect = atom_aind
    if 'cell' in tensors:
        coord_wrap = _wrap_coord(tensors)
        atom_apos = coord_wrap
        rep_apos, rep_sind, rep_aind = _pbc_repeat(coord_wrap, tensors['cell'],
                                                   tensors['ind_1'], rc)
        atom_sind = tf.concat([atom_sind, rep_sind], 0)
        atom_apos = tf.concat([atom_apos, rep_apos], 0)
        atom_aind = tf.concat([atom_aind, rep_aind], 0)
        atom_gind = tf.cumsum(tf.ones_like(atom_sind), 0)
    atom_apos = atom_apos - tf.reduce_min(atom_apos, axis=0)
    atom_cpos = tf.concat(
        [atom_sind, tf.cast(atom_apos // rc, tf.int32)], axis=1)
    cpos_shap = tf.concat([tf.reduce_max(atom_cpos, axis=0) + 1, [1]], axis=0)
    samp_ccnt = tf.squeeze(tf.scatter_nd(atom_cpos,
                                         tf.ones_like(atom_sind, tf.int32),
                                         cpos_shap),
                           axis=-1)
    cell_cpos = tf.cast(tf.where(samp_ccnt), tf.int32)
    cell_cind = tf.cumsum(tf.ones(tf.shape(cell_cpos)[0], tf.int32))
    cell_cind = tf.expand_dims(cell_cind, 1)
    samp_cind = tf.squeeze(tf.scatter_nd(cell_cpos, cell_cind, cpos_shap),
                           axis=-1)
    # Get the atom's relative index(rind) and position(rpos) in cell
    # And each cell's atom list (alst)
    atom_cind = tf.gather_nd(samp_cind, atom_cpos) - 1
    atom_cind_args = tf.contrib.framework.argsort(atom_cind, axis=0)
    atom_cind_sort = tf.gather(atom_cind, atom_cind_args)

    atom_rind_sort = tf.cumsum(tf.ones_like(atom_cind, tf.int32))
    cell_rind_min = tf.segment_min(atom_rind_sort, atom_cind_sort)
    atom_rind_sort = atom_rind_sort - tf.gather(cell_rind_min, atom_cind_sort)
    atom_rpos_sort = tf.stack([atom_cind_sort, atom_rind_sort], axis=1)
    atom_rpos = tf.unsorted_segment_sum(atom_rpos_sort, atom_cind_args,
                                        tf.shape(atom_gind)[0])
    cell_alst_shap = [tf.shape(cell_cind)[0], tf.reduce_max(samp_ccnt), 1]
    cell_alst = tf.squeeze(tf.scatter_nd(atom_rpos, atom_gind, cell_alst_shap),
                           axis=-1)
    # Get cell's linked cell list, for cells in to_collect only
    disp_mat = np.zeros([3, 3, 3, 4], np.int32)
    disp_mat[:, :, :, 1] = np.reshape([-1, 0, 1], (3, 1, 1))
    disp_mat[:, :, :, 2] = np.reshape([-1, 0, 1], (1, 3, 1))
    disp_mat[:, :, :, 3] = np.reshape([-1, 0, 1], (1, 1, 3))
    disp_mat = np.reshape(disp_mat, (1, 27, 4))
    cell_npos = tf.expand_dims(cell_cpos, 1) + disp_mat
    npos_mask = tf.reduce_all((cell_npos >= 0) & (cell_npos < cpos_shap[:-1]),
                              2)
    cell_nind = tf.squeeze(
        tf.scatter_nd(
            tf.cast(tf.where(npos_mask), tf.int32),
            tf.expand_dims(
                tf.gather_nd(samp_cind, tf.boolean_mask(cell_npos, npos_mask)),
                1), tf.concat([tf.shape(cell_npos)[:-1], [1]], 0)), -1)
    # Finally, a sparse list of atom pairs
    coll_nind = tf.gather(cell_nind, tf.gather_nd(atom_cind, to_collect))
    pair_ic = tf.cast(tf.where(coll_nind), tf.int32)
    pair_ic_i = pair_ic[:, 0]
    pair_ic_c = tf.gather_nd(coll_nind, pair_ic) - 1
    pair_ic_alst = tf.gather(cell_alst, pair_ic_c)

    pair_ij = tf.cast(tf.where(pair_ic_alst), tf.int32)
    pair_ij_i = tf.gather(pair_ic_i, pair_ij[:, 0])
    pair_ij_j = tf.gather_nd(pair_ic_alst, pair_ij) - 1

    diff = tf.gather(atom_apos, pair_ij_j) - tf.gather(atom_apos, pair_ij_i)
    dist = tf.norm(diff, axis=-1)
    ind_rc = tf.where((dist < rc) & (dist > 0))
    dist = tf.gather_nd(dist, ind_rc)
    diff = tf.gather_nd(diff, ind_rc)
    pair_i_aind = tf.gather_nd(tf.gather(atom_aind, pair_ij_i), ind_rc)
    pair_j_aind = tf.gather_nd(tf.gather(atom_aind, pair_ij_j), ind_rc)

    output = {
        'ind_2': tf.concat([pair_i_aind, pair_j_aind], 1),
        'dist': dist,
        'diff': diff
    }
    return output
Example #32
0
def rescale(array_x): # convert to [0,1]
    amax = tf.reduce_max(array_x, axis=1, keep_dims=True)
    amin = tf.reduce_min(array_x, axis=1, keep_dims=True)
    rescaled = array_x - amin
    rescaled = rescaled / amax
    return rescaled
Example #33
0
    def _compute_att_loss(self, images, matching_labels, pred_matched_coordinates, all_pred_dots):
        prev_pred_matched_coord = self.model.coord(images)

        last_layer_res = 1
        loss = 0
        losses = []
        accs = []
        for i, (src_att_size, dest_att_size) in enumerate(zip(self.model.src_att_iters, self.model.dest_att_iters)):
            # Split up match labels according to last feature layer
            match_labels_split = tf.stack(tf.split(tf.stack(tf.split(matching_labels, last_layer_res, 1), 1), last_layer_res, 3), 2)
            # Split each tile again according to resolution of current feature layer
            match_labels_split = tf.stack(tf.split(tf.stack(tf.split(match_labels_split, src_att_size, 3), 3), src_att_size, 5), 4)
            # Collapse x,y dimensions
            match_labels_split = tf.reshape(match_labels_split, [tf.shape(match_labels_split)[0], (last_layer_res) ** 2, src_att_size ** 2, 1, tf.shape(match_labels_split)[-3], tf.shape(match_labels_split)[-2], tf.shape(match_labels_split)[-1]])

            # Split up the possible coordinates to match for each feature vector
            possible_coords_to_match = tf.stack(tf.split(tf.stack(tf.split(prev_pred_matched_coord, last_layer_res, 1), 1), last_layer_res, 3), 2)
            possible_coords_to_match = tf.stack(tf.split(tf.stack(tf.split(possible_coords_to_match, dest_att_size, 3), 3), dest_att_size, 5), 4)
            possible_coords_to_match = tf.reshape(possible_coords_to_match, [tf.shape(possible_coords_to_match)[0], (last_layer_res) ** 2, 1, dest_att_size ** 2, tf.shape(possible_coords_to_match)[-3], tf.shape(possible_coords_to_match)[-2], tf.shape(possible_coords_to_match)[-1]])
            possible_coords_to_match = tf.cast(possible_coords_to_match, tf.int64)

            # For each possible tile that can be matched in the second image, compute min and max coordinates
            x_min = tf.reduce_min(possible_coords_to_match[..., 0], axis=[-1, -2], keepdims=True)
            y_min = tf.reduce_min(possible_coords_to_match[..., 1], axis=[-1, -2], keepdims=True)
            x_max = tf.reduce_max(possible_coords_to_match[..., 0], axis=[-1, -2], keepdims=True)
            y_max = tf.reduce_max(possible_coords_to_match[..., 1], axis=[-1, -2], keepdims=True)

            # Define boolean map over pixelwise match labels to mask the ones that can be matched at all
            pixels_with_matches_mask = tf.logical_and(
                tf.logical_and(
                    x_min <= match_labels_split[..., 0],
                    match_labels_split[..., 0] <= x_max
                ),
                tf.logical_and(
                    y_min <= match_labels_split[..., 1],
                    match_labels_split[..., 1] <= y_max
                )
            )

            # Count the number of matchable pixels per feature vector combination
            num_matchable_pixels = tf.reduce_sum(tf.cast(pixels_with_matches_mask, tf.float32), axis=[-2, -1])
            num_matchable_pixels = tf.reshape(num_matchable_pixels, [tf.shape(num_matchable_pixels)[0], (last_layer_res), (last_layer_res), src_att_size, src_att_size, dest_att_size ** 2])
            num_matchable_pixels = tf.reshape(num_matchable_pixels, [tf.shape(num_matchable_pixels)[0], (last_layer_res) ** 2, src_att_size ** 2, dest_att_size ** 2])

            # Get a one hot encoding of the feature vector combination that contains the most pixel wise matches
            best_match_mask = tf.one_hot(tf.argmax(num_matchable_pixels, -1), tf.shape(num_matchable_pixels)[-1])
            # Check that the one with the most matches has more than zero matches
            any_valid_matches_mask = tf.cast(tf.logical_and(num_matchable_pixels > 0, best_match_mask > 0), tf.float32)
            # Sum up per feature vector in the first map, to get a mask across feature vectors that have any valid match at all in their destination area
            any_valid_matches_mask = tf.reduce_sum(any_valid_matches_mask, -1, keepdims=True) > 0

            # Extract the dot products that correspond to the best feature matches
            max_logit = tf.boolean_mask(all_pred_dots[i], best_match_mask > 0)
            max_logit = tf.reshape(max_logit, tf.shape(all_pred_dots[i])[:-1])

            # Compute the contrastive loss between the dot product of the best match and all other dot products
            single_loss = tf.maximum(0.0, -(max_logit[..., None] - all_pred_dots[i]) + 1.0)
            # Mask out the loss of any dot products that correspond to feature matches that have no matches at all and make
            # sure that we only consider dot products that correspond to zero matching pixels
            mask = tf.logical_and(any_valid_matches_mask, num_matchable_pixels == 0)
            single_loss = tf.boolean_mask(single_loss, mask)
            # Compute the mean
            single_loss = tf.reduce_mean(single_loss)
            losses.append(single_loss)
            # Sum up losses
            loss += single_loss * self.aux_loss_weight

            # Extract the number of matching pixels corresponding to the highest dot product per feature vector in the first image
            hit_points = tf.reduce_sum(num_matchable_pixels * tf.one_hot(tf.argmax(all_pred_dots[i], -1), tf.shape(all_pred_dots[i])[-1]), axis=-1)
            # Mask out any feature vectors that dont have any valid match at all
            hit_points_mask = tf.reduce_sum(num_matchable_pixels, axis=-1) > 0
            hit_points = tf.boolean_mask(hit_points, hit_points_mask)
            # Build binary tensor that denotes all feature vectors that were matched correctly
            hit_points = tf.cast(hit_points > 0, tf.float32)
            accs.append(hit_points)

            # Prepare for next iteration
            last_layer_res *= src_att_size
            prev_pred_matched_coord = pred_matched_coordinates[i]

        return loss, losses, accs
Example #34
0
def mcae(input_tensor, train_tensor, nb_bands, cost_weights, l2_loss):

    a = []
    b = []

    conv1, a_t, b_t = bn_pelu_conv(input_tensor,
                                   train_tensor,
                                   scope_id='conv1',
                                   num_filters=256,
                                   l2_loss=l2_loss)
    a.append(a_t)
    b.append(b_t)
    pool1 = tf.layers.max_pooling2d(conv1, 2, 2, name='pool1')

    conv2, a_t, b_t = bn_pelu_conv(pool1,
                                   train_tensor,
                                   scope_id='conv2',
                                   num_filters=512,
                                   l2_loss=l2_loss)
    a.append(a_t)
    b.append(b_t)
    pool2 = tf.layers.max_pooling2d(conv2, 2, 2, name='pool2')

    conv3, a_t, b_t = bn_pelu_conv(pool2,
                                   train_tensor,
                                   scope_id='conv3',
                                   num_filters=512,
                                   l2_loss=l2_loss)
    a.append(a_t)
    b.append(b_t)
    pool3 = tf.layers.max_pooling2d(conv3, 2, 2, name='pool3')

    conv4, a_t, b_t = bn_pelu_conv(pool3,
                                   train_tensor,
                                   scope_id='conv4',
                                   num_filters=1024,
                                   kernel_size=(1, 1),
                                   strides=(1, 1),
                                   l2_loss=l2_loss)
    a.append(a_t)
    b.append(b_t)

    refinement3, a_t, b_t = refinement_layer(pool3,
                                             conv4,
                                             train_tensor,
                                             512,
                                             scope_id='refinement3',
                                             l2_loss=l2_loss)
    a += a_t
    b += b_t

    refinement2, a_t, b_t = refinement_layer(pool2,
                                             refinement3,
                                             train_tensor,
                                             512,
                                             scope_id='refinement2',
                                             l2_loss=l2_loss)
    a += a_t
    b += b_t

    refinement1, a_t, b_t = refinement_layer(pool1,
                                             refinement2,
                                             train_tensor,
                                             256,
                                             scope_id='refinement1',
                                             l2_loss=l2_loss)
    a += a_t
    b += b_t

    output_tensor = tf.layers.conv2d(
        refinement1,
        nb_bands,
        kernel_size=(1, 1),
        padding='same',
        name='output_conv',
        kernel_initializer=tf.glorot_normal_initializer(),
        kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_loss))

    loss = cost_weights[0] * mse_loss(output_tensor, input_tensor)
    loss += (cost_weights[1] * mse_loss(conv1, refinement1))
    loss += (cost_weights[2] * mse_loss(conv2, refinement2))
    loss += (cost_weights[3] * mse_loss(conv3, refinement3))

    loss = loss - 1000.0 * tf.minimum(tf.reduce_min(a), 0)
    loss = loss - 1000.0 * tf.minimum(tf.reduce_min(b), 0)

    return loss, refinement1
Example #35
0
 def loop_cond(tails, m, v, count):
   del tails, m, v  # unused
   return tf.reduce_min(count) < 100
Example #36
0
def proposal_label_op(boxes,
                      gt_boxes,
                      gt_labels,
                      image_info,
                      batch_size_per_im=512,
                      fg_fraction=0.25,
                      fg_thresh=0.5,
                      bg_thresh_hi=0.5,
                      bg_thresh_lo=0.):
    """Assigns the proposals with ground truth labels and performs subsmpling.

  Given proposal `boxes`, `gt_boxes`, and `gt_labels`, the function uses the
  following algorithm to generate the final `batch_size_per_im` RoIs.
  1. Calculates the IoU between each proposal box and each gt_boxes.
  2. Assigns each proposal box with a ground truth class and box label by
     choosing the largest overlap.
  3. Samples `batch_size_per_im` boxes from all proposal boxes, and returns
     box_targets, class_targets, and RoIs.
  The reference implementations of #1 and #2 are here: https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/json_dataset.py  # pylint: disable=line-too-long
  The reference implementation of #3 is here: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py.  # pylint: disable=line-too-long

  Args:
    boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
      proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
      last dimension is the pixel coordinates of scaled images in
      [ymin, xmin, ymax, xmax] form.
    gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
      tensor might have paddings with a value of -1. The coordinates of gt_boxes
      are in the pixel coordinates of the original image scale.
    gt_labels: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
      tensor might have paddings with a value of -1.
    image_info: a tensor of shape [batch_size, 5] where the three columns
      encode the input image's [height, width, scale,
      original_height, original_width]. Height and width are for
      the input to the network, not the original image; scale is the scale
      factor used to scale the network input size to the original image size.
      See dataloader.DetectionInputProcessor for details. The last two are
      original height and width.
    batch_size_per_im: a integer represents RoI minibatch size per image.
    fg_fraction: a float represents the target fraction of RoI minibatch that
      is labeled foreground (i.e., class > 0).
    fg_thresh: a float represents the overlap threshold for an RoI to be
      considered foreground (if >= fg_thresh).
    bg_thresh_hi: a float represents the overlap threshold for an RoI to be
      considered background (class = 0 if overlap in [LO, HI)).
    bg_thresh_lo: a float represents the overlap threshold for an RoI to be
      considered background (class = 0 if overlap in [LO, HI)).
  Returns:
    box_targets: a tensor with a shape of [batch_size, K, 4]. The tensor
      contains the ground truth pixel coordinates of the scaled images for each
      roi. K is the number of sample RoIs (e.g., batch_size_per_im).
    class_targets: a integer tensor with a shape of [batch_size, K]. The tensor
      contains the ground truth class for each roi.
    rois: a tensor with a shape of [batch_size, K, 4], representing the
      coordinates of the selected RoI.
    proposal_to_label_map: a tensor with a shape of [batch_size, K]. This tensor
      keeps the mapping between proposal to labels. proposal_to_label_map[i]
      means the index of the ground truth instance for the i-th proposal.
  """
    with tf.name_scope('proposal_label'):
        batch_size = boxes.shape[0]
        # Scales ground truth boxes to the scaled image coordinates.
        image_scale = 1 / image_info[:, 2]
        scaled_gt_boxes = gt_boxes * tf.reshape(image_scale,
                                                [batch_size, 1, 1])

        # The reference implementation intentionally includes ground truth boxes in
        # the proposals. see https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/json_dataset.py#L359.  # pylint: disable=line-too-long
        boxes = tf.concat([boxes, scaled_gt_boxes], axis=1)
        iou = _bbox_overlap(boxes, scaled_gt_boxes)

        (pre_sample_box_targets, pre_sample_class_targets, max_overlap,
         proposal_to_label_map) = _add_class_assignments(
             iou, scaled_gt_boxes, gt_labels)

        # Generates a random sample of RoIs comprising foreground and background
        # examples. reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py#L132  # pylint: disable=line-too-long
        positives = tf.greater(max_overlap,
                               fg_thresh * tf.ones_like(max_overlap))
        negatives = tf.logical_and(
            tf.greater_equal(max_overlap,
                             bg_thresh_lo * tf.ones_like(max_overlap)),
            tf.less(max_overlap, bg_thresh_hi * tf.ones_like(max_overlap)))
        pre_sample_class_targets = tf.where(
            negatives, tf.zeros_like(pre_sample_class_targets),
            pre_sample_class_targets)
        proposal_to_label_map = tf.where(negatives,
                                         tf.zeros_like(proposal_to_label_map),
                                         proposal_to_label_map)

        # Handles ground truth paddings.
        ignore_mask = tf.less(tf.reduce_min(iou, axis=2),
                              tf.zeros_like(max_overlap))
        # indicator includes both positive and negative labels.
        # labels includes only positives labels.
        # positives = indicator & labels.
        # negatives = indicator & !labels.
        # ignore = !indicator.
        labels = positives
        pos_or_neg = tf.logical_or(positives, negatives)
        indicator = tf.logical_and(pos_or_neg, tf.logical_not(ignore_mask))

        all_samples = []
        sampler = (
            balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
                positive_fraction=fg_fraction, is_static=True))
        # Batch-unroll the sub-sampling process.
        for i in range(batch_size):
            samples = sampler.subsample(indicator[i], batch_size_per_im,
                                        labels[i])
            all_samples.append(samples)
        all_samples = tf.stack([all_samples], axis=0)[0]
        # A workaround to get the indices from the boolean tensors.
        _, samples_indices = tf.nn.top_k(tf.to_int32(all_samples),
                                         k=batch_size_per_im,
                                         sorted=True)
        # Contructs indices for gather.
        samples_indices = tf.reshape(
            samples_indices +
            tf.expand_dims(tf.range(batch_size) * tf.shape(boxes)[1], 1), [-1])
        rois = tf.reshape(
            tf.gather(tf.reshape(boxes, [-1, 4]), samples_indices),
            [batch_size, -1, 4])
        class_targets = tf.reshape(
            tf.gather(tf.reshape(pre_sample_class_targets, [-1, 1]),
                      samples_indices), [batch_size, -1])
        sample_box_targets = tf.reshape(
            tf.gather(tf.reshape(pre_sample_box_targets, [-1, 4]),
                      samples_indices), [batch_size, -1, 4])
        sample_proposal_to_label_map = tf.reshape(
            tf.gather(tf.reshape(proposal_to_label_map, [-1, 1]),
                      samples_indices), [batch_size, -1])
    return sample_box_targets, class_targets, rois, sample_proposal_to_label_map
# is_adversarial = tf.cast(tf.argmax(softmax[0, ...]) == labels, dtype=tf.float32)


inputs = rgb_input
perturbation = eps_rgb

# target label: for untargeted attack set original label


# adversarial classification loss
one_hot = tf.one_hot(labels,NUM_CLASSES)
# max_non_correct_cls = tf.reduce_max(softmax-one_hot),axis=-1)
    
target_cls_logits = tf.boolean_mask(model_logits,tf.cast(tf.one_hot(labels,NUM_CLASSES),dtype=tf.bool))
max_non_target_cls_logits = tf.reduce_max((1-one_hot)*model_logits +one_hot*tf.reduce_min(model_logits),axis=-1)


margin =20.0
l_1= 0.0
l_2 = ((max_non_target_cls_logits-(target_cls_logits-margin))**2)/margin
l_3 =max_non_target_cls_logits -(target_cls_logits-margin)

# ind = tf.argmax(1-tf.one_hot(labels,NUM_CLASSES),axis=-1)
# scores, ind = tf.math.top_k(softmax,k=2)

# max_non_correct_cls = scores[tf.cast(tf.squeeze(tf.equal(tf.cast(ind[0], dtype=tf.int64), labels)),dtype=tf.int64)]


# l_1= 0.0
# l_2 = ((max_non_correct_cls-(corret_cls_prob-margin))**2)/margin
Example #38
0
def batch_multiclass_non_max_suppression(boxes,
                                         scores,
                                         score_thresh,
                                         iou_thresh,
                                         max_size_per_class,
                                         max_total_size=0,
                                         clip_window=None,
                                         num_valid_boxes=None,
                                         scope=None,
                                         use_static_shapes=False,
                                         parallel_iterations=32):
    """Multi-class version of non maximum suppression that operates on a batch.

  This op is similar to `multiclass_non_max_suppression` but operates on a batch
  of boxes and scores. See documentation for `multiclass_non_max_suppression`
  for details.

  Args:
    boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing
      detections. If `q` is 1 then same boxes are used for all classes
        otherwise, if `q` is equal to number of classes, class-specific boxes
        are used.
    scores: A [batch_size, num_anchors, num_classes] float32 tensor containing
      the scores for each of the `num_anchors` detections. The scores have to be
      non-negative when use_static_shapes is set True.
    score_thresh: scalar threshold for score (low scoring boxes are removed).
    iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap
      with previously selected boxes are removed).
    max_size_per_class: maximum number of retained boxes per class.
    max_total_size: maximum number of boxes retained over all classes. By
      default returns all boxes retained after capping boxes per class.
    clip_window: A float32 tensor of shape [batch_size, 4]  where each entry is
      of the form [y_min, x_min, y_max, x_max] representing the window to clip
      boxes to before performing non-max suppression. This argument can also be
      a tensor of shape [4] in which case, the same clip window is applied to
      all images in the batch. If clip_widow is None, all boxes are used to
      perform non-max suppression.
    num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape
      [batch_size] representing the number of valid boxes to be considered
      for each image in the batch.  This parameter allows for ignoring zero
      paddings.
    use_static_shapes: If true, the output nmsed boxes are padded to be of
      length `max_size_per_class` and it doesn't clip boxes to max_total_size.
      Defaults to false.
    parallel_iterations: (optional) number of batch items to process in
      parallel.

  Returns:
    'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor
      containing the non-max suppressed boxes.
    'nmsed_scores': A [batch_size, max_detections] float32 tensor containing
      the scores for the boxes.
    'nmsed_classes': A [batch_size, max_detections] float32 tensor
      containing the class for boxes.
    'num_detections': A [batch_size] int32 tensor indicating the number of
      valid detections per batch item. Only the top num_detections[i] entries in
      nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
      entries are zero paddings.

  Raises:
    ValueError: if `q` in boxes.shape is not 1 or not equal to number of
      classes as inferred from scores.shape.
  """
    q = boxes.shape[2].value
    num_classes = scores.shape[2].value
    if q != 1 and q != num_classes:
        raise ValueError('third dimension of boxes must be either 1 or equal '
                         'to the third dimension of scores')
    with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'):
        boxes_shape = boxes.shape
        batch_size = boxes_shape[0].value
        num_anchors = boxes_shape[1].value

        if batch_size is None:
            batch_size = tf.shape(boxes)[0]
        if num_anchors is None:
            num_anchors = tf.shape(boxes)[1]

        # If num valid boxes aren't provided, create one and mark all boxes as
        # valid.
        if num_valid_boxes is None:
            num_valid_boxes = tf.ones([batch_size],
                                      dtype=tf.int32) * num_anchors

        if clip_window is None:
            clip_window = tf.stack([
                tf.reduce_min(boxes[:, :, :, 0]),
                tf.reduce_min(boxes[:, :, :, 1]),
                tf.reduce_max(boxes[:, :, :, 2]),
                tf.reduce_max(boxes[:, :, :, 3])
            ])
        if clip_window.shape.ndims == 1:
            clip_window = tf.tile(tf.expand_dims(clip_window, 0),
                                  [batch_size, 1])

        def _single_image_nms_fn(args):
            """Runs NMS on a single image and returns padded output.

      Args:
        args: A list of tensors consisting of the following:
          per_image_boxes - A [num_anchors, q, 4] float32 tensor containing
            detections. If `q` is 1 then same boxes are used for all classes
            otherwise, if `q` is equal to number of classes, class-specific
            boxes are used.
          per_image_scores - A [num_anchors, num_classes] float32 tensor
            containing the scores for each of the `num_anchors` detections.
          per_image_clip_window - A 1D float32 tensor of the form
            [ymin, xmin, ymax, xmax] representing the window to clip the boxes
            to.
          per_image_additional_fields - (optional) A variable number of float32
            tensors each with size [num_anchors, ...].
          per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of
            shape [batch_size] representing the number of valid boxes to be
            considered for each image in the batch.  This parameter allows for
            ignoring zero paddings.

      Returns:
        'nmsed_boxes': A [max_detections, 4] float32 tensor containing the
          non-max suppressed boxes.
        'nmsed_scores': A [max_detections] float32 tensor containing the scores
          for the boxes.
        'nmsed_classes': A [max_detections] float32 tensor containing the class
          for boxes.
        'num_detections': A [batch_size] int32 tensor indicating the number of
          valid detections per batch item. Only the top num_detections[i]
          entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The
          rest of the entries are zero paddings.
      """
            per_image_boxes = args[0]
            per_image_scores = args[1]
            per_image_clip_window = args[2]
            per_image_num_valid_boxes = args[-1]
            per_image_boxes = tf.reshape(
                tf.slice(per_image_boxes, 3 * [0],
                         tf.stack([per_image_num_valid_boxes, -1, -1])),
                [-1, q, 4])
            per_image_scores = tf.reshape(
                tf.slice(per_image_scores, [0, 0],
                         tf.stack([per_image_num_valid_boxes, -1])),
                [-1, num_classes])
            nmsed_boxlist, num_valid_nms_boxes = multiclass_non_max_suppression(
                per_image_boxes,
                per_image_scores,
                score_thresh,
                iou_thresh,
                max_size_per_class,
                max_total_size,
                clip_window=per_image_clip_window)

            if not use_static_shapes:
                nmsed_boxlist = box_list_ops.pad_or_clip_box_list(
                    nmsed_boxlist, max_total_size)
            num_detections = num_valid_nms_boxes
            nmsed_boxes = nmsed_boxlist.get()
            nmsed_scores = nmsed_boxlist.get_field("scores")
            nmsed_classes = nmsed_boxlist.get_field("classes")
            return ([nmsed_boxes, nmsed_scores, nmsed_classes, num_detections])

        batch_outputs = shape_utils.static_or_dynamic_map_fn(
            _single_image_nms_fn,
            elems=([boxes, scores, clip_window, num_valid_boxes]),
            dtype=(3 * [tf.float32] + [tf.int32]),
            parallel_iterations=parallel_iterations)

        batch_nmsed_boxes = batch_outputs[0]
        batch_nmsed_scores = batch_outputs[1]
        batch_nmsed_classes = batch_outputs[2]
        batch_num_detections = batch_outputs[-1]

        return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes,
                batch_num_detections)
truth = np.sin(2 * np.pi * (np.arange(features, dtype=np.float32)) / features)

# Initialize population array
population = tf.Variable(np.random.randn(pop_size, features), dtype=tf.float32)

# Initialize placeholders
truth_ph = tf.placeholder(tf.float32, [1, features])
crossover_mat_ph = tf.placeholder(tf.float32, [num_children, features])
mutation_val_ph = tf.placeholder(tf.float32, [num_children, features])

# Calculate fitness (MSE)
fitness = -tf.reduce_mean(tf.square(tf.subtract(population, truth_ph)), 1)
top_vals, top_ind = tf.nn.top_k(fitness, k=pop_size)

# Get best fit individual
best_val = tf.reduce_min(top_vals)
best_ind = tf.argmin(top_vals, 0)
best_individual = tf.gather(population, best_ind)

# Get parents
population_sorted = tf.gather(population, top_ind)
parents = tf.slice(population_sorted, [0, 0], [num_parents, features])

# Get offspring
# Indices to shuffle-gather parents
rand_parent1_ix = np.random.choice(num_parents, num_children)
rand_parent2_ix = np.random.choice(num_parents, num_children)
# Gather parents by shuffled indices, expand back out to pop_size too
rand_parent1 = tf.gather(parents, rand_parent1_ix)
rand_parent2 = tf.gather(parents, rand_parent2_ix)
rand_parent1_sel = tf.multiply(rand_parent1, crossover_mat_ph)
Example #40
0
    def _train_body(self, obses_anchor, obses_negative, actions, next_obses_anchor, rewards, dones, weights):
        with tf.device(self.device):
            assert len(dones.shape) == 2
            assert len(rewards.shape) == 2
            rewards = tf.squeeze(rewards, axis=1)
            dones = tf.squeeze(dones, axis=1)

            not_dones = 1. - tf.cast(dones, dtype=tf.float32)

            with tf.GradientTape(persistent=True) as tape:
                obs_features = self._encoder(obses_anchor)
                next_obs_features = self._encoder(next_obses_anchor)

                # Compute loss of critic Q
                current_q1 = self.qf1(obs_features, actions)
                current_q2 = self.qf2(obs_features, actions)
                next_v_target = self.vf_target(next_obs_features)

                target_q = tf.stop_gradient(
                    rewards + not_dones * self.discount * next_v_target)

                td_loss_q1 = tf.reduce_mean((target_q - current_q1) ** 2)
                td_loss_q2 = tf.reduce_mean((target_q - current_q2) ** 2)  # Eq.(7)

                # Compute loss of critic V
                current_v = self.vf(obs_features)

                sample_actions, logp = self.actor(obs_features)  # Resample actions to update V
                current_q1 = self.qf1(obs_features, sample_actions)
                current_q2 = self.qf2(obs_features, sample_actions)
                current_min_q = tf.minimum(current_q1, current_q2)

                target_v = tf.stop_gradient(current_min_q - self.alpha * logp)
                td_errors = target_v - current_v
                td_loss_v = tf.reduce_mean(td_errors ** 2)  # Eq.(5)

                # Compute loss of policy
                policy_loss = tf.reduce_mean(self.alpha * logp - current_min_q)  # Eq.(12)

                # Compute loss of temperature parameter for entropy
                if self.auto_alpha:
                    alpha_loss = -tf.reduce_mean(
                        (self.log_alpha * tf.stop_gradient(logp + self.target_alpha)))

                # Compute loss of CURL
                z_anchor = obs_features
                z_negatives = self._encoder_target(obses_negative)
                # Compute similarities with bilinear products
                logits = tf.matmul(z_anchor, tf.matmul(self._curl_w, tf.transpose(z_negatives, [1, 0])))
                # tf.print(logits)
                logits -= tf.reduce_max(logits, axis=-1, keepdims=True)  # (batch_size, batch_size)
                # tf.print(logits)
                # tf.print(tf.keras.losses.sparse_categorical_crossentropy(tf.range(self.batch_size), logits, from_logits=True))
                curl_loss = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(tf.range(self.batch_size), logits, from_logits=True))  # Eq.4

            q1_grad = tape.gradient(td_loss_q1, self.qf1.trainable_variables)
            self.qf1_optimizer.apply_gradients(
                zip(q1_grad, self.qf1.trainable_variables))
            q2_grad = tape.gradient(td_loss_q2, self.qf2.trainable_variables)
            self.qf2_optimizer.apply_gradients(
                zip(q2_grad, self.qf2.trainable_variables))

            vf_grad = tape.gradient(td_loss_v, self.vf.trainable_variables)
            self.vf_optimizer.apply_gradients(
                zip(vf_grad, self.vf.trainable_variables))
            update_target_variables(
                self.vf_target.weights, self.vf.weights, self.tau)

            actor_grad = tape.gradient(
                policy_loss, self.actor.trainable_variables)
            self.actor_optimizer.apply_gradients(
                zip(actor_grad, self.actor.trainable_variables))

            if self.auto_alpha:
                alpha_grad = tape.gradient(alpha_loss, [self.log_alpha])
                self.alpha_optimizer.apply_gradients(
                    zip(alpha_grad, [self.log_alpha]))
                self.alpha.assign(tf.exp(self.log_alpha))

            curl_grads = tape.gradient(curl_loss, [self._curl_w] + self._encoder.trainable_variables)
            self._curl_optimizer.apply_gradients(
                zip(curl_grads, [self._curl_w] + self._encoder.trainable_variables))
            update_target_variables(
                self._encoder_target.weights, self._encoder.weights, self._encoder_tau)

            del tape

        return td_errors, policy_loss, td_loss_v, td_loss_q1, tf.reduce_min(logp), tf.reduce_max(logp), tf.reduce_mean(
            logp), curl_loss, tf.reduce_mean(z_anchor), tf.reduce_mean(z_negatives), tf.reduce_mean(logits)
Example #41
0
        def f(x, init, ema, dropout_p, verbose, context):
            if init and verbose:
                with tf.variable_scope('debug'):
                    xmean, xvar = tf.nn.moments(x,
                                                axes=list(
                                                    range(len(x.get_shape()))))
                    x = tf.Print(
                        x,
                        [
                            tf.shape(x), xmean,
                            tf.sqrt(xvar),
                            tf.reduce_min(x),
                            tf.reduce_max(x),
                            tf.reduce_any(tf.is_nan(x)),
                            tf.reduce_any(tf.is_inf(x))
                        ],
                        message='{} (shape/mean/std/min/max/nan/inf) '.format(
                            self.template.variable_scope.name),
                        summarize=10,
                    )
            B, H, W, C = x.shape.as_list()

            pos_emb = to_default_floatx(
                get_var(
                    'pos_emb',
                    ema=ema,
                    shape=[H, W, filters],
                    initializer=tf.random_normal_initializer(stddev=0.01),
                ))
            x = conv2d(x, name='c1', num_units=filters, init=init, ema=ema)
            for i_block in range(blocks):
                with tf.variable_scope('block{}'.format(i_block)):
                    x = gated_resnet(x,
                                     name='conv',
                                     a=context,
                                     use_nin=use_nin,
                                     init=init,
                                     ema=ema,
                                     dropout_p=dropout_p)
                    if use_ln:
                        x = norm(x, name='ln1', ema=ema)
                    x = attn(x,
                             name='attn',
                             pos_emb=pos_emb,
                             heads=attn_heads,
                             init=init,
                             ema=ema,
                             dropout_p=dropout_p)
                    if use_ln:
                        x = norm(x, name='ln2', ema=ema)
                    assert x.shape == [B, H, W, filters]
            x = nonlinearity(x)
            x = (nin if use_final_nin else conv2d)(x,
                                                   name='c2',
                                                   num_units=C *
                                                   (2 + 3 * components),
                                                   init_scale=init_scale,
                                                   init=init,
                                                   ema=ema)
            assert x.shape == [B, H, W, C * (2 + 3 * components)]
            x = tf.reshape(x, [B, H, W, C, 2 + 3 * components])

            x = at_least_float32(x)  # do mix-logistics stuff in float32

            s, t = tf.tanh(x[:, :, :, :, 0]), x[:, :, :, :, 1]
            ml_logits, ml_means, ml_logscales = tf.split(x[:, :, :, :, 2:],
                                                         3,
                                                         axis=4)
            ml_logscales = tf.maximum(ml_logscales, -7.)

            assert s.shape == t.shape == [B, H, W, C]
            assert ml_logits.shape == ml_means.shape == ml_logscales.shape == [
                B, H, W, C, components
            ]
            return s, t, ml_logits, ml_means, ml_logscales
Example #42
0
 def reduce_min(self, t, name=None):
     """Implements reduce_min for TF backend."""
     with tf.compat.v2.name_scope('VM.reduce_min'):
         return tf.reduce_min(input_tensor=t, name=name)
Example #43
0
def BalanceLoss(pred,
                gt,
                mask,
                balance_loss=True,
                main_loss_type="DiceLoss",
                negative_ratio=3,
                return_origin=False,
                eps=1e-6):
    """
    The BalanceLoss for Differentiable Binarization text detection
    args:
        pred (variable): predicted feature maps.
        gt (variable): ground truth feature maps.
        mask (variable): masked maps.
        balance_loss (bool): whether balance loss or not, default is True
        main_loss_type (str): can only be one of ['CrossEntropy','DiceLoss',
            'Euclidean','BCELoss', 'MaskL1Loss'], default is  'DiceLoss'.
        negative_ratio (int|float): float, default is 3.
        return_origin (bool): whether return unbalanced loss or not, default is False.
        eps (float): default is 1e-6.
    return: (variable) balanced loss
    """
    positive = gt * mask
    negative = (1 - gt) * mask

    positive_count = tf.reduce_sum(positive)
    positive_count_int = tf.cast(positive_count, dtype=np.int32)
    negative_count = tf.reduce_min(
        [tf.reduce_sum(negative), positive_count * negative_ratio])
    negative_count_int = tf.cast(negative_count, dtype=np.int32)

    if main_loss_type == "CrossEntropy":
        loss = tf.cross_entropy(input=pred, label=gt, soft_label=True)
        loss = tf.reduce_mean(loss)
    elif main_loss_type == "Euclidean":
        loss = tf.square(pred - gt)
        loss = tf.reduce_mean(loss)
    elif main_loss_type == "DiceLoss":
        loss = DiceLoss(pred, gt, mask)
    elif main_loss_type == "BCELoss":
        loss = tf.sigmoid_cross_entropy_with_logits(pred, label=gt)
    elif main_loss_type == "MaskL1Loss":
        loss = MaskL1Loss(pred, gt, mask)
    else:
        loss_type = [
            'CrossEntropy', 'DiceLoss', 'Euclidean', 'BCELoss', 'MaskL1Loss'
        ]
        raise Exception(
            "main_loss_type in BalanceLoss() can only be one of {}".format(
                loss_type))

    if not balance_loss:
        return loss

    positive_loss = positive * loss
    negative_loss = negative * loss
    negative_loss = tf.reshape(negative_loss, shape=[-1])
    negative_loss, _ = tf.nn.top_k(negative_loss, k=negative_count_int)
    balance_loss = (tf.reduce_sum(positive_loss) + tf.reduce_sum(negative_loss)
                    ) / (positive_count + negative_count + eps)

    if return_origin:
        return balance_loss, loss
    return balance_loss
Example #44
0
        def loop_step(batch_index, ts, stop_decoder, states, alphas, cand_seqs,
                      cand_scores, completed_scores, completed_scores_scaled,
                      completed_seqs, completed_lens):
            """
            Args:
              batch_index: batch index
              ts (int): time step
              stop_decoder (bool): stop decoding
              ys (?): [beam_size]
              states (float): [beam_size, state_size]
              alphas (float): [beam_size, alpha_size]
              cand_scores: [beam_size], sequence score
              cand_seqs: [beam_size, ts], ts increases over time

            Returns:
              logits shape: [beam_size, output_dim]
              state: [beam_size, state_size]
              alpha: [beam_size, alpha_size]

            """
            # 1. get score from one step decoder
            # logits = tf.one_hot(ts, depth=num_symbols, off_value=0.0, dtype=tf.float32)
            if DEBUG: ts = tf.Print(ts, [ts], message='ts: ')
            ys = cand_seqs[:, ts]
            if DEBUG: ys = tf.Print(ys, [ys], message='Y(t-1): ')
            logits, states, alphas = self.step(ys, states, alphas, batch_index)
            if DEBUG: logits = tf.Print(logits, [logits], message='logits: ')
            Z = tf.reduce_logsumexp(logits, 1, keep_dims=True)
            if DEBUG: Z = tf.Print(Z, [Z], message='Z: ')
            logprobs = tf.subtract(logits, Z)  # [beam_size, num_symbols]
            new_scores = tf.add(logprobs,
                                tf.expand_dims(cand_scores,
                                               1))  # [beam_size, num_symbols]
            if DEBUG:
                new_scores = tf.Print(new_scores, [new_scores],
                                      message='new_scores: ')

            num_unstop_symbols = tf.shape(new_scores)[1] - 1
            new_uncompleted_scores, new_completed_scores = tf.split(
                new_scores, [num_unstop_symbols, 1], 1)
            if DEBUG:
                new_uncompleted_scores = tf.Print(
                    new_uncompleted_scores, [new_uncompleted_scores],
                    message='new_uncompleted_scores: ')

            # 2. Update completed seqs  --------------------------------------
            # 2.1 update scores
            new_completed_scores = tf.squeeze(new_completed_scores,
                                              -1)  # [beam_size]
            all_completed_scores = tf.concat(
                [completed_scores, new_completed_scores], 0)  # [2*beam_size]

            # 2.2 choose top K from scaled_scores
            new_completed_scores_scaled = tf.div(new_completed_scores,
                                                 tf.to_float(ts + 1))
            all_scores_scaled = tf.concat(
                [completed_scores_scaled, new_completed_scores_scaled], 0)
            completed_scores_scaled, indices = tf.nn.top_k(all_scores_scaled,
                                                           k=beam_size,
                                                           sorted=False)
            if DEBUG:
                indices = tf.Print(indices, [indices],
                                   message='top K completed indices: ')

            # 2.2 update len
            new_completed_lens = tf.fill([beam_size], tf.add(ts,
                                                             1))  # [beam_size]
            all_lens = tf.concat([completed_lens, new_completed_lens],
                                 0)  # [2*beam_size]
            completed_lens = tf.gather(all_lens,
                                       indices,
                                       validate_indices=True,
                                       axis=0)  # [beam_size]
            if DEBUG:
                completed_lens = tf.Print(completed_lens, [completed_lens],
                                          message='completed lens',
                                          summarize=5)

            # 2.3 update seqs
            all_completed = tf.concat([completed_seqs, cand_seqs], 0)
            completed_seqs = tf.gather(all_completed,
                                       indices,
                                       validate_indices=True,
                                       axis=0)  # [beam_size, ts]
            if DEBUG:
                completed_seqs = tf.Print(completed_seqs, [completed_seqs],
                                          message='completed seqs: ',
                                          summarize=MAX_STEPS + 2)

            # 2.4 stop decoding loop
            max_uncompleted = tf.reduce_max(new_uncompleted_scores)
            completed_scores = tf.gather(all_completed_scores,
                                         indices,
                                         validate_indices=True,
                                         axis=0)
            min_completed = tf.reduce_min(completed_scores)
            stop_decoder = tf.greater(min_completed, max_uncompleted)

            # 2. Update completed seqs  --------------------------------------

            # 3. Update uncompleted sequences --------------------------------
            # new_uncompleted_scores: [beam_size, num_symbols-1]
            # top_k: [beam_size]. indices of top k scores
            def f0():
                return new_uncompleted_scores[0, :]

            def f1():
                return new_uncompleted_scores

            un_scores = tf.cond(tf.equal(ts, 0), f0, f1)
            new_flat = tf.squeeze(tf.reshape(
                un_scores, [-1, 1]))  # [beam_size*num_unstop_symbols]

            # get top K symbols
            cand_scores, flat_indices = tf.nn.top_k(new_flat,
                                                    k=beam_size,
                                                    sorted=False)
            cand_parents = tf.div(flat_indices, num_unstop_symbols)
            _ys = tf.mod(flat_indices,
                         num_unstop_symbols)  # [beam_size], y(t) for next step
            A = tf.gather(cand_seqs[:, 0:ts + 1],
                          cand_parents)  #[beam_size, ts+1]
            B = tf.expand_dims(_ys, -1)  # [beam_size, 1]
            C = tf.fill([beam_size, MAX_STEPS + 2 - ts - 2], stop_symbol)
            cand_seqs = tf.concat([A, B, C], 1)  # [beam_size, MAX_STEPS]
            if DEBUG:
                cand_seqs = tf.Print(cand_seqs, [cand_seqs],
                                     message='cand seqs: ',
                                     summarize=MAX_STEPS + 2)
            cand_seqs = tf.reshape(cand_seqs, [beam_size, MAX_STEPS + 2])
            cand_scores.set_shape([beam_size])
            completed_seqs = tf.reshape(completed_seqs,
                                        [beam_size, MAX_STEPS + 2])

            s1_shape = [beam_size, self.attention_cell.state_size]
            s2_shape = [beam_size, self.decoder_cell.state_size]
            s3_shape = [beam_size, self.attn_context.context_size]

            # prepare data for next step
            # states = tf.gather(states, cand_parents, axis=0)
            # states = self.select_states(states, cand_parents)
            states = tuple(tf.gather(el, cand_parents) for el in states)
            states[0].set_shape(s1_shape)
            states[1].set_shape(s2_shape)
            states[2].set_shape(s3_shape)
            alphas = tf.gather(alphas, cand_parents, axis=1)
            alphas_shape = [self.attn_context.num_encoder_states, beam_size]
            alphas = tf.reshape(alphas, alphas_shape)
            # alphas.set_shape(alphas_shape)
            # 3. Update uncompleted sequences --------------------------------

            ts = tf.add(ts, 1)
            return batch_index, ts, stop_decoder, states, alphas, cand_seqs, \
                cand_scores, completed_scores, completed_scores_scaled, \
                completed_seqs, completed_lens
Example #45
0
 def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
     return tf.logical_and(
         tf.reduce_min(output_size) < max_output_size,
         idx < num_boxes // _NMS_TILE_SIZE)
Example #46
0
    def critic_loss(self, time_steps, actions, next_time_steps):
        """Computes the critic loss for TD3 training.

    Args:
      time_steps: A batch of timesteps.
      actions: A batch of actions.
      next_time_steps: A batch of next timesteps.
    Returns:
      critic_loss: A scalar critic loss.
    """
        with tf.name_scope('critic_loss'):
            target_actions, _ = self._target_actor_network(
                next_time_steps.observation, next_time_steps.step_type)

            # Add gaussian noise to each action before computing target q values
            def add_noise_to_action(action):  # pylint: disable=missing-docstring
                dist = tfp.distributions.Normal(loc=tf.zeros_like(action),
                                                scale=self._target_policy_noise * \
                                                tf.ones_like(action))
                noise = dist.sample()
                noise = tf.clip_by_value(noise,
                                         -self._target_policy_noise_clip,
                                         self._target_policy_noise_clip)
                return action + noise

            noisy_target_actions = nest.map_structure(add_noise_to_action,
                                                      target_actions)

            # Target q-values are the min of the two networks
            target_q_values_1, _ = self._target_critic_network_1(
                next_time_steps.observation, noisy_target_actions,
                next_time_steps.step_type)
            target_q_values_2, _ = self._target_critic_network_2(
                next_time_steps.observation, noisy_target_actions,
                next_time_steps.step_type)
            target_q_values = tf.minimum(target_q_values_1, target_q_values_2)

            td_targets = tf.stop_gradient(
                self._reward_scale_factor * next_time_steps.reward +
                self._gamma * next_time_steps.discount * target_q_values)

            pred_td_targets_1, _ = self._critic_network_1(
                time_steps.observation, actions, time_steps.step_type)
            pred_td_targets_2, _ = self._critic_network_2(
                time_steps.observation, actions, time_steps.step_type)
            pred_td_targets_all = [pred_td_targets_1, pred_td_targets_2]

            if self._debug_summaries:
                tf.contrib.summary.histogram('td_targets', td_targets)
                with tf.name_scope('td_targets'):
                    tf.contrib.summary.scalar('mean',
                                              tf.reduce_mean(td_targets))
                    tf.contrib.summary.scalar('max', tf.reduce_max(td_targets))
                    tf.contrib.summary.scalar('min', tf.reduce_min(td_targets))

                for td_target_idx in range(2):
                    pred_td_targets = pred_td_targets_all[td_target_idx]
                    td_errors = td_targets - pred_td_targets
                    with tf.name_scope('critic_net_%d' % (td_target_idx + 1)):
                        tf.contrib.summary.histogram('td_errors', td_errors)
                        tf.contrib.summary.histogram('pred_td_targets',
                                                     pred_td_targets)
                        with tf.name_scope('td_errors'):
                            tf.contrib.summary.scalar(
                                'mean', tf.reduce_mean(td_errors))
                            tf.contrib.summary.scalar(
                                'mean_abs', tf.reduce_mean(tf.abs(td_errors)))
                            tf.contrib.summary.scalar('max',
                                                      tf.reduce_max(td_errors))
                            tf.contrib.summary.scalar('min',
                                                      tf.reduce_min(td_errors))
                        with tf.name_scope('pred_td_targets'):
                            tf.contrib.summary.scalar(
                                'mean', tf.reduce_mean(pred_td_targets))
                            tf.contrib.summary.scalar(
                                'max', tf.reduce_max(pred_td_targets))
                            tf.contrib.summary.scalar(
                                'min', tf.reduce_min(pred_td_targets))

            critic_loss = (
                self._td_errors_loss_fn(td_targets, pred_td_targets_1) +
                self._td_errors_loss_fn(td_targets, pred_td_targets_2))
            if nest_utils.is_batched_nested_tensors(time_steps,
                                                    self.time_step_spec(),
                                                    num_outer_dims=2):
                # Sum over the time dimension.
                critic_loss = tf.reduce_sum(critic_loss, axis=1)

            return tf.reduce_mean(critic_loss)
Example #47
0
    def criterions(self, inputs, outputs):
        loss_dict = {}
        total_l1_loss = 0.
        total_ssim_loss = 0.
        total_smooth_loss = 0.

        for scale in range(self.params.num_scales):
            l1_losses = []
            ssim_losses = []
            for f_i in self.params.frame_ids[1:]:
                target_rgb = inputs['img']
                pred_rgb = outputs[f'pred{f_i}{scale}']

                # L1 Loss
                abs_diff = tf.abs(target_rgb - pred_rgb)
                l1_loss = tf.reduce_mean(abs_diff, axis=-1,
                                         keepdims=True)  # [b, h, w, 1]
                l1_losses.append(l1_loss)

                # SSIM Loss
                ssim = tf.reduce_mean(ssim_loss(target_rgb, pred_rgb),
                                      axis=-1,
                                      keepdims=True)
                ssim_losses.append(ssim)

            ssim_losses = tf.concat(ssim_losses, -1)
            l1_losses = tf.concat(l1_losses, -1)
            if scale == 0:
                outputs['l1_error'] = l1_losses

            # Automasking
            identity_l1_losses = []
            identity_ssim_losses = []
            for f_i in self.params.frame_ids[1:]:
                target_rgb = inputs['img']
                source_rgb = inputs[f'img{f_i}']

                # L1 Loss
                abs_diff = tf.abs(source_rgb - target_rgb)
                l1_loss = tf.reduce_mean(abs_diff, axis=-1, keepdims=True)
                identity_l1_losses.append(l1_loss)

                # SSIM Loss [b, h, w, 1]
                ssim = tf.reduce_mean(ssim_loss(source_rgb, target_rgb),
                                      axis=-1,
                                      keepdims=True)
                identity_ssim_losses.append(ssim)

            identity_ssim_losses = tf.concat(identity_ssim_losses, -1)
            identity_l1_losses = tf.concat(identity_l1_losses, -1)

            identity_l1_losses += tf.random.normal(
                identity_l1_losses.shape) * 0.00001  # Break ties
            identity_ssim_losses += tf.random.normal(
                identity_ssim_losses.shape) * 0.00001  # Break ties

            combined_l1 = tf.concat((identity_l1_losses, l1_losses), axis=-1)
            combined_ssim = tf.concat((identity_ssim_losses, ssim_losses),
                                      axis=-1)

            combined_l1 = tf.reduce_min(combined_l1, axis=-1)
            combined_ssim = tf.reduce_min(combined_ssim, axis=-1)

            _ssim_loss = tf.reduce_mean(combined_ssim) * 0.85
            _l1_loss = tf.reduce_mean(combined_l1) * 0.15
            total_l1_loss += _l1_loss
            total_ssim_loss += _ssim_loss

            # Disparity smoothness
            disparity = outputs[f'disparity{scale}']
            mean_disp = tf.reduce_mean(disparity, [1, 2], keepdims=True)
            norm_disp = disparity / (mean_disp + 1e-7)

            h = self.params.input_h // (2**scale)
            w = self.params.input_w // (2**scale)
            color_resized = tf.image.resize(target_rgb, (h, w))

            smooth = smooth_loss(norm_disp, color_resized) * 1e-3
            total_smooth_loss += smooth

        total_smooth_loss /= self.params.num_scales
        total_ssim_loss /= self.params.num_scales
        total_l1_loss /= self.params.num_scales
        loss_dict['ssim'] = total_ssim_loss
        loss_dict['l1'] = total_l1_loss
        loss_dict['smooth'] = total_smooth_loss
        loss_dict['loss'] = total_smooth_loss + total_ssim_loss + total_l1_loss
        return loss_dict
Example #48
0
def compute_neighbor_list(coords, nbr_cutoff, N, M, n_cells, ndim=3, k=5):
    """Computes a neighbor list from atom coordinates.

  Parameters
  ----------
  coords: tf.Tensor
    Shape (N, ndim)
  N: int
    Max number atoms
  M: int
    Max number neighbors
  ndim: int
    Dimensionality of space.
  k: int
    Number of nearest neighbors to pull down.

  Returns
  -------
  nbr_list: tf.Tensor
    Shape (N, M) of atom indices
  """
    start = tf.cast(tf.reduce_min(coords), tf.int32)
    stop = tf.cast(tf.reduce_max(coords), tf.int32)
    cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
    # Associate each atom with cell it belongs to. O(N*n_cells)
    # Shape (n_cells, k)
    atoms_in_cells, _ = put_atoms_in_cells(coords, cells, N, n_cells, ndim, k)
    # Shape (N, 1)
    cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)

    # Associate each cell with its neighbor cells. Assumes periodic boundary
    # conditions, so does wrapround. O(constant)
    # Shape (n_cells, 26)
    neighbor_cells = compute_neighbor_cells(cells, ndim, n_cells)

    # Shape (N, 26)
    neighbor_cells = tf.squeeze(tf.gather(neighbor_cells, cells_for_atoms))

    # coords of shape (N, ndim)
    # Shape (N, 26, k, ndim)
    tiled_coords = tf.tile(tf.reshape(coords, (N, 1, 1, ndim)), (1, 26, k, 1))

    # Shape (N, 26, k)
    nbr_inds = tf.gather(atoms_in_cells, neighbor_cells)

    # Shape (N, 26, k)
    atoms_in_nbr_cells = tf.gather(atoms_in_cells, neighbor_cells)

    # Shape (N, 26, k, ndim)
    nbr_coords = tf.gather(coords, atoms_in_nbr_cells)

    # For smaller systems especially, the periodic boundary conditions can
    # result in neighboring cells being seen multiple times. Maybe use tf.unique to
    # make sure duplicate neighbors are ignored?

    # TODO(rbharath): How does distance need to be modified here to
    # account for periodic boundary conditions?
    # Shape (N, 26, k)
    dists = tf.reduce_sum((tiled_coords - nbr_coords)**2, axis=3)

    # Shape (N, 26*k)
    dists = tf.reshape(dists, [N, -1])

    # TODO(rbharath): This will cause an issue with duplicates!
    # Shape (N, M)
    closest_nbr_locs = tf.nn.top_k(dists, k=M)[1]

    # N elts of size (M,) each
    split_closest_nbr_locs = [
        tf.squeeze(locs) for locs in tf.split(closest_nbr_locs, N)
    ]

    # Shape (N, 26*k)
    nbr_inds = tf.reshape(nbr_inds, [N, -1])

    # N elts of size (26*k,) each
    split_nbr_inds = [tf.squeeze(split) for split in tf.split(nbr_inds, N)]

    # N elts of size (M,) each
    neighbor_list = [
        tf.gather(nbr_inds, closest_nbr_locs)
        for (nbr_inds,
             closest_nbr_locs) in zip(split_nbr_inds, split_closest_nbr_locs)
    ]

    # Shape (N, M)
    neighbor_list = tf.stack(neighbor_list)

    return neighbor_list
Example #49
0
    def __init__(self,
                 M,
                 Lr,
                 Wrow,
                 Otraining,
                 initial_W,
                 initial_H,
                 labels,
                 training_set_mask,
                 testing_set_mask,
                 validation_set_mask,
                 mask_features,
                 validation,
                 order_chebyshev_row=18,
                 cheby=1,
                 n_conv_feat=36,
                 l2_regu=10,
                 num_iterations=10,
                 gamma=563.39,
                 gamma_H=688.85,
                 gamma_W=248.91,
                 gamma_e=890.14,
                 learning_rate=0.00089,
                 idx_gpu='/gpu:1'):
        """
                     Neural network architecture. Compute an update X of M.
                     Inputs:
                        M: initial matrix with all the known values,
                        initial_W, initial_H: initialization of W and H with the feature values from M and only the labels of the training set,
                        Wrow : adjacency matrix,
                        Lr, Lrow_sex, Lrow_agesex: laplacian matrices, respectively for the age, sex and age and sex graphs,
                        Otraining: mask on the training features to know which ones are on the training set for the loss function,
                        labels: labels for every subject,
                        training_set_mask, testing_set_mask, validation_set_mask: indexes of subjects that respectively belong to the training, testing and validation sets,
                        mask_features: mask composed of 1 values for all the features and 0 for the labels to compute the Frobenius loss term on the features,
                        validation: boolean, to include a validation set or not
                        order_chebyshev_row: order to use for the Chebyshev polynomials. Default value = 18,
                        cheby: boolean, use of a GCNN or a GCN layer. 0: GCN, 1: GCNN. Default value = 1,
                        n_conv_feat: number of weights to use for the GCNN layer. Default value = 36,
                        l2_regu: coefficient to use in front of the l2 regularization term. Default value = 1,
                        dropout: dropout rate on the GCN output. Default = 0.5,
                        num_iterations: number of times that the process GCNN+LSTM is done before updating X and computing the loss function. Default value = 10,
                        gamma, gamma_H, gamma_W, gamma_e: hyperparameters of the loss function in front of all the terms. Default value = 1,
                        learning_rate: learning rate. Default value = 0.001
        """
        self.ord_row = order_chebyshev_row
        self.num_iterations = num_iterations
        self.n_conv_feat = n_conv_feat

        with tf.Graph().as_default() as g:
            tf.logging.set_verbosity(tf.logging.ERROR)
            self.graph = g
            tf.set_random_seed(0)
            with tf.device(idx_gpu):

                #loading of the laplacians
                self.Lr = tf.cast(Lr, 'float32')

                self.norm_Lr = self.Lr - tf.diag(tf.ones([
                    Lr.shape[0],
                ]))

                #compute all chebyshev polynomials a priori
                self.list_row_cheb_pol = list()
                self.compute_cheb_polynomials(self.norm_Lr, self.ord_row,
                                              self.list_row_cheb_pol)

                #definition of constant matrices
                self.Wrow = tf.constant(Wrow, dtype=tf.float32)
                self.M = tf.constant(M, dtype=tf.float32)
                self.Otraining = tf.constant(Otraining,
                                             dtype=tf.float32)  #training mask
                self.training_set_mask = tf.constant(training_set_mask,
                                                     dtype=tf.float32)
                self.testing_set_mask = tf.constant(testing_set_mask,
                                                    dtype=tf.float32)
                if validation:
                    self.validation_set_mask = tf.constant(validation_set_mask,
                                                           dtype=tf.float32)

                self.mask_features = tf.constant(mask_features,
                                                 dtype=tf.float32)

                self.output_nn = tf.zeros([
                    training_set_mask.shape[0],
                ])

                ##################################definition of the NN variables#####################################
                #cheby=0 #0 or 1. Using the Chebyshev decomposition for the GCNN or not
                #definition of the weights for extracting the global features
                if cheby == 0:
                    self.W_conv_W = tf.get_variable(
                        "W_conv_W",
                        shape=[initial_W.shape[1], self.n_conv_feat],
                        initializer=tf.contrib.layers.xavier_initializer())
                else:
                    self.W_conv_W = tf.get_variable(
                        "W_conv_W",
                        shape=[
                            self.ord_row * initial_W.shape[1], self.n_conv_feat
                        ],
                        initializer=tf.contrib.layers.xavier_initializer())
                self.b_conv_W = tf.Variable(tf.zeros([
                    self.n_conv_feat,
                ]))

                #recurrent N parameters
                self.W_f_u = tf.get_variable(
                    "W_f_u",
                    shape=[self.n_conv_feat, self.n_conv_feat],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.W_i_u = tf.get_variable(
                    "W_i_u",
                    shape=[self.n_conv_feat, self.n_conv_feat],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.W_o_u = tf.get_variable(
                    "W_o_u",
                    shape=[self.n_conv_feat, self.n_conv_feat],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.W_c_u = tf.get_variable(
                    "W_c_u",
                    shape=[self.n_conv_feat, self.n_conv_feat],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.U_f_u = tf.get_variable(
                    "U_f_u",
                    shape=[self.n_conv_feat, self.n_conv_feat],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.U_i_u = tf.get_variable(
                    "U_i_u",
                    shape=[self.n_conv_feat, self.n_conv_feat],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.U_o_u = tf.get_variable(
                    "U_o_u",
                    shape=[self.n_conv_feat, self.n_conv_feat],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.U_c_u = tf.get_variable(
                    "U_c_u",
                    shape=[self.n_conv_feat, self.n_conv_feat],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.b_f_u = tf.Variable(tf.zeros([
                    self.n_conv_feat,
                ]))
                self.b_i_u = tf.Variable(tf.zeros([
                    self.n_conv_feat,
                ]))
                self.b_o_u = tf.Variable(tf.zeros([
                    self.n_conv_feat,
                ]))
                self.b_c_u = tf.Variable(tf.zeros([
                    self.n_conv_feat,
                ]))

                #output parameters
                self.W_out_W = tf.get_variable(
                    "W_out_W",
                    shape=[self.n_conv_feat, initial_W.shape[1]],
                    initializer=tf.contrib.layers.xavier_initializer())
                self.b_out_W = tf.Variable(tf.zeros([
                    initial_W.shape[1],
                ]))

                #########definition of the NN
                #definition of W and H
                self.W = tf.constant(initial_W.astype('float32'))
                self.H = tf.Variable(initial_H.astype('float32'))

                self.X = tf.matmul(
                    self.W, self.H,
                    transpose_b=True)  #we may initialize it at random here
                self.list_X = list()
                self.list_X.append(tf.identity(self.X))

                #RNN
                self.h_u = tf.zeros([M.shape[0], self.n_conv_feat])
                self.c_u = tf.zeros([M.shape[0], self.n_conv_feat])

                for k in range(self.num_iterations):
                    #extraction of global features vectors
                    if cheby == 0:
                        self.final_feat_users = self.mono_conv(
                            self.Wrow, self.W, self.W_conv_W, self.b_conv_W)
                    else:  #cheby = 1
                        self.final_feat_users = self.mono_conv_cheby(
                            self.list_row_cheb_pol, self.ord_row, self.W,
                            self.W_conv_W, self.b_conv_W)

                    # row RNN
                    self.f_u = tf.sigmoid(
                        tf.matmul(self.final_feat_users, self.W_f_u) +
                        tf.matmul(self.h_u, self.U_f_u) + self.b_f_u)
                    self.i_u = tf.sigmoid(
                        tf.matmul(self.final_feat_users, self.W_i_u) +
                        tf.matmul(self.h_u, self.U_i_u) + self.b_i_u)
                    self.o_u = tf.sigmoid(
                        tf.matmul(self.final_feat_users, self.W_o_u) +
                        tf.matmul(self.h_u, self.U_o_u) + self.b_o_u)

                    self.update_c_u = tf.sigmoid(
                        tf.matmul(self.final_feat_users, self.W_c_u) +
                        tf.matmul(self.h_u, self.U_c_u) + self.b_c_u)
                    self.c_u = tf.multiply(self.f_u, self.c_u) + tf.multiply(
                        self.i_u, self.update_c_u)
                    self.h_u = tf.multiply(self.o_u, tf.sigmoid(self.c_u))  #

                    #compute update of matrix X
                    self.delta_W = tf.tanh(
                        tf.matmul(self.c_u, self.W_out_W) + self.b_out_W)

                    self.W += self.delta_W

                    self.X = tf.matmul(self.W, self.H, transpose_b=True)
                    self.list_X.append(
                        tf.identity(
                            tf.reshape(self.X, [
                                self.M.get_shape().as_list()[0],
                                self.M.get_shape().as_list()[1]
                            ])))
                self.X = tf.matmul(self.W, self.H, transpose_b=True)
                #########loss definition

                #computation of the accuracy term
                self.norm_X = 1 + 4 * (self.X - tf.reduce_min(self.X)) / (
                    tf.reduce_max(self.X - tf.reduce_min(self.X)))
                self.Xnormed = self.norm_tensor(self.X)
                frob_tensor = tf.multiply(self.Otraining,
                                          self.Xnormed - self.M)
                frob_tensor = tf.multiply(self.mask_features, frob_tensor)
                self.loss_frob = self.frobenius_norm_square(
                    frob_tensor) / np.sum(Otraining)

                #computation of the regularization terms
                trace_row_tensor = tf.matmul(
                    tf.matmul(self.X, self.Lr, transpose_a=True), self.X)
                self.loss_trace_row = tf.trace(trace_row_tensor) / tf.cast(
                    tf.shape(self.X)[0] * tf.shape(self.X)[1], 'float32')

                self.frob_norm_H = self.frobenius_norm_square(
                    self.H) / tf.cast(
                        tf.shape(self.H)[0] * tf.shape(self.H)[1], 'float32')
                self.frob_norm_W = self.frobenius_norm_square(
                    self.W) / tf.cast(
                        tf.shape(self.W)[0] * tf.shape(self.W)[1], 'float32')
                self.output_nn = tf.slice(
                    self.X,
                    begin=[0, self.M.get_shape().as_list()[1] - 1],
                    size=[self.M.get_shape().as_list()[0], 1])
                self.output_nn = tf.sigmoid(self.output_nn)
                output_nn_train = (tf.multiply(self.training_set_mask,
                                               self.output_nn))
                self.prediction_train = output_nn_train
                self.labels_training = tf.multiply(self.training_set_mask,
                                                   labels)

                self.binary_entropy = tf.losses.sigmoid_cross_entropy(
                    multi_class_labels=self.labels_training,
                    logits=self.prediction_train)

                #l2 regularization
                self.l2_regu = tf.nn.l2_loss(self.W_f_u) + tf.nn.l2_loss(
                    self.W_i_u) + tf.nn.l2_loss(self.W_o_u) + tf.nn.l2_loss(
                        self.W_c_u) + tf.nn.l2_loss(
                            self.U_f_u) + tf.nn.l2_loss(
                                self.U_i_u) + tf.nn.l2_loss(
                                    self.U_o_u) + tf.nn.l2_loss(
                                        self.U_c_u) + tf.nn.l2_loss(
                                            self.W_out_W) + tf.nn.l2_loss(
                                                self.W_conv_W)

                #training loss definition
                self.loss = self.loss_frob + (gamma) * self.loss_trace_row + (
                    gamma_W
                ) * self.frob_norm_W + (
                    gamma_H
                ) * self.frob_norm_H + gamma_e * self.binary_entropy + l2_regu * self.l2_regu  #

                if validation:
                    output_nn_val = (tf.multiply(self.validation_set_mask,
                                                 self.output_nn))
                    self.predictions_val = output_nn_val
                    self.labels_val = tf.multiply(self.validation_set_mask,
                                                  labels)

                output_nn_test = (tf.multiply(self.testing_set_mask,
                                              self.output_nn))
                self.predictions = output_nn_test
                self.labels_test = tf.multiply(self.testing_set_mask, labels)

                self.binary_entropy_test = tf.losses.sigmoid_cross_entropy(
                    multi_class_labels=self.labels_test,
                    logits=self.predictions)
                self.predictions_error = self.binary_entropy_test

                #definition of the solver
                self.optimizer = tf.train.AdamOptimizer(
                    learning_rate=learning_rate).minimize(self.loss)

                self.var_grad = tf.gradients(self.loss,
                                             tf.trainable_variables())
                self.norm_grad = self.frobenius_norm_square(
                    tf.concat([tf.reshape(g, [-1]) for g in self.var_grad], 0))

                # Create a session for running Ops on the Graph.
                config = tf.ConfigProto(allow_soft_placement=True)
                config.gpu_options.allow_growth = True
                self.session = tf.Session(config=config)

                # Run the Op to initialize the variables.
                init = tf.initialize_all_variables()
                #print(init)
                self.session.run(init)
    def train_one_batch(self, x_sup, x_qry, y_lbl, step):
        with tf.GradientTape() as recon_grad, tf.GradientTape(
        ) as mine_grad, tf.GradientTape() as cls_grad:
            outputs = self.de_model({"x_sup": x_sup, "x_qry": x_qry})

            recon_loss = keras.losses.MeanSquaredError()(outputs["z_sup"],
                                                         outputs["z_sup_ent"])
            recon_loss += keras.losses.MeanSquaredError()(outputs["z_qry"],
                                                          outputs["z_qry_ent"])
            cls_loss = keras.losses.CategoricalCrossentropy()(y_lbl,
                                                              outputs["logit"])
            mine_loss = -(
                keras.backend.mean(outputs["d_pos"]) - keras.backend.log(
                    keras.backend.mean(keras.backend.exp(outputs["d_neg"])) +
                    0.000001))

        cls_acc = keras.backend.mean(
            keras.metrics.categorical_accuracy(y_lbl, outputs["logit"]))

        losses = {
            "recon": recon_loss.numpy(),
            "cls": cls_loss.numpy(),
            "mine": mine_loss.numpy(),
            "acc": cls_acc.numpy()
        }

        recon_grads = recon_grad.gradient(recon_loss, self.de_all)
        cls_grads = cls_grad.gradient(cls_loss, self.de_all)
        mine_grads = mine_grad.gradient(mine_loss, self.de_all)

        name_grad = {}
        name_var = {}
        for g, v in zip(recon_grads, self.de_all):
            if g is not None:
                if v.name in name_grad:
                    name_grad[v.name] += g
                else:
                    name_grad[v.name] = g
                    name_var[v.name] = v
        for g, v in zip(cls_grads, self.de_all):
            if g is not None:
                if v.name in name_grad:
                    name_grad[v.name] += g
                else:
                    name_grad[v.name] = g
                    name_var[v.name] = v

        for g, v in zip(mine_grads, self.de_all):
            temp_grad = g
            if g is not None:
                if v.name in name_grad:
                    temp_grad = tf.stack((g, name_grad[v.name]), axis=0)
                    norm_grad = tf.norm(temp_grad, axis=(-1, -2))
                    clip_norm = tf.reduce_min(norm_grad)
                    temp_grad = tf.clip_by_norm(g, clip_norm=clip_norm)
                if "mine" not in v.name:
                    temp_grad = tf.negative(temp_grad)
                if v.name in name_grad:
                    name_grad[v.name] += temp_grad
                else:
                    name_grad[v.name] = temp_grad
                    name_var[v.name] = v

        grad_vars = [(name_grad[name], name_var[name])
                     for name in name_grad.keys()]
        self.all_optim = self.optim.apply_gradients(grad_vars)

        return losses, outputs
ent_gen_loss = -tf.reduce_mean(
                        tf.reduce_sum(
                            tf.multiply(Dis_prob_gen, tf.log(Dis_prob_gen)), 1
                        )
                    )

Dis_loss = Disc_loss_real + Disc_loss_gen + 1.85 * ent_real_loss

# generator loss
pt_loss = pull_away_term(Dis_h2_tar_gen)

y_tar= tf.placeholder(tf.int32, shape=[None, Dis_dim[3]])
T_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Dis_logit_tar, labels=y_tar))
tar_thrld = tf.divide(tf.reduce_max(Dis_prob_tar_gen[:,-1]) +
                      tf.reduce_min(Dis_prob_tar_gen[:,-1]), 2)

indicator = tf.sign(
              tf.subtract(Dis_prob_tar_gen[:,-1],
                          tar_thrld))
condition = tf.greater(tf.zeros_like(indicator), indicator)
mask_tar = tf.where(condition, tf.zeros_like(indicator), indicator)
Gen_ent_loss = tf.reduce_mean(tf.multiply(tf.log(Dis_prob_tar_gen[:,-1]), mask_tar))

fm_loss = tf.reduce_mean(
            tf.sqrt(
                tf.reduce_sum(
                    tf.square(Dis_logit_real - Dis_logit_gen), 1
                    )
                )
            )
Example #52
0
FILE_PATH = os.path.join(DATA_PATH, "demo.h5")

## registration parameters
image_loss_config = {"name": "ssd"}
learning_rate = 0.01
total_iter = int(10) if args.test else int(1000)

## load image
if not os.path.exists(DATA_PATH):
    raise ValueError("Download the data using demo_data.py script")
if not os.path.exists(FILE_PATH):
    raise ValueError("Download the data using demo_data.py script")

fid = h5py.File(FILE_PATH, "r")
fixed_image = tf.cast(tf.expand_dims(fid["image"], axis=0), dtype=tf.float32)
fixed_image = (fixed_image - tf.reduce_min(fixed_image)) / (
    tf.reduce_max(fixed_image) - tf.reduce_min(fixed_image)
)  # normalisation to [0,1]

# generate a radomly-affine-transformed moving image
fixed_image_size = fixed_image.shape
transform_random = gen_rand_affine_transform(batch_size=1, scale=0.2)
grid_ref = layer_util.get_reference_grid(grid_size=fixed_image_size[1:4])
grid_random = layer_util.warp_grid(grid_ref, transform_random)
moving_image = layer_util.resample(vol=fixed_image, loc=grid_random)
# warp the labels to get ground-truth using the same random affine, for validation
fixed_labels = tf.cast(tf.expand_dims(fid["label"], axis=0), dtype=tf.float32)
moving_labels = tf.stack(
    [
        layer_util.resample(vol=fixed_labels[..., idx], loc=grid_random)
        for idx in range(fixed_labels.shape[4])
Example #53
0
    def get_or_create(self, gaze_model, name, reuse, num_actions, layer_norm,
                      dueling):
        if name in self.models:
            assert reuse == True
            logger.log("QFunc model named %s is reused" % name)
        else:
            logger.log("QFunc model named %s is created" % name)
            assert reuse == False
            imgs = L.Input(shape=(84, 84, 4))

            g = gaze_model(imgs)
            gaze_heatmaps = L.Lambda(lambda x: (x - tf.reduce_min(
                x, [1, 2, 3], True)) / (tf.reduce_max(x, [1, 2, 3], True) - tf.
                                        reduce_min(x, [1, 2, 3], True)))(g)
            g = gaze_heatmaps

            x = imgs
            x = L.Multiply(name="img_mul_gaze")([x, g])
            c1 = L.Conv2D(32, (8, 8),
                          strides=4,
                          padding='same',
                          activation="relu",
                          name='mul_c1')
            x = c1(x)
            c2 = L.Conv2D(64, (4, 4),
                          strides=2,
                          padding='same',
                          activation="relu",
                          name='mul_c2')
            x = c2(x)
            c3 = L.Conv2D(64, (3, 3),
                          strides=1,
                          padding='same',
                          activation="relu",
                          name='mul_c3')
            x = c3(x)
            # ============================ channel 2 ============================
            orig_x = imgs
            orig_x = L.Conv2D(32, (8, 8),
                              strides=4,
                              padding='same',
                              activation="relu")(orig_x)
            orig_x = L.Conv2D(64, (4, 4),
                              strides=2,
                              padding='same',
                              activation="relu")(orig_x)
            orig_x = L.Conv2D(64, (3, 3),
                              strides=1,
                              padding='same',
                              activation="relu")(orig_x)

            x = L.Average()([x, orig_x])
            x = L.Flatten()(x)
            if dueling:
                state_score = L.Dense(512)(x)
                if layer_norm:
                    state_score = L.BatchNormalization()(state_score)
                state_score = L.Activation('relu')(state_score)
                state_score = L.Dense(1)(state_score)
            action_score = L.Dense(512)(x)
            if layer_norm:
                logger.log(
                    "Warning: layer_norm is set to True, but Keras doesn't have it. Replacing with BatchNorm."
                )
                action_score = L.BatchNormalization()(action_score)
            action_score = L.Activation('relu')(action_score)
            last_dense = L.Dense(num_actions, name="logits")
            action_score = last_dense(action_score)
            if dueling:

                def wrapped_tf_ops(s):
                    action_score, state_score = s
                    return action_score - tf.reduce_mean(
                        action_score, 1, keep_dims=True) + state_score

                action_score = L.Lambda(wrapped_tf_ops)(
                    [action_score, state_score])
            model = Model(inputs=[imgs], outputs=[action_score, gaze_heatmaps])
            model.interesting_layers = [
                c1, c2, c3, last_dense
            ]  # export variable interesting_layers for monitoring in train.py
            self.models[name] = model
        return self.models[name]
Example #54
0
 def Combined(x):
   ys = [s.Value(x) for s in self.schedules]
   return tf.reduce_min(tf.stack(ys), axis=0)
Example #55
0
def spread_network(x):
    # Use these as global so we can calculate the minumim and maximum of the entire data set
    global tf_min, tf_max
    # Dense layer
    layer_1 = tf.layers.dense(
        x,
        100,
        activation=tf.nn.relu,
        bias_initializer=tf.zeros_initializer(),
        kernel_initializer=tf.glorot_uniform_initializer())  # input layer

    tf_min = tf.minimum(tf_min, tf.reduce_min(layer_1, axis=[0]))
    tf_max = tf.maximum(tf_max, tf.reduce_max(layer_1, axis=[0]))

    tf_teller = tf.subtract(layer_1, tf_min)
    tf_noemer = tf.subtract(tf_max, tf_min)
    tf_prime = tf.divide(tf_teller, tf_noemer)
    tf_prime2 = tf.multiply(tf_prime, tf.to_float(_spread_factor))
    x_prime = tf_prime2

    # Duplicate x' more for spreading
    # Example:
    # [[a,b],[c,d]] -> [[a,b],[c,d],[a,b],[c,d]]
    x_prime = tf.reshape(x_prime, [-1])
    tile = tf.tile(x_prime, _spread_factor)
    tile = tf.reshape(tile, [_spread_factor[0], tf.shape(x_prime)[0]])

    # Reshape as follows:
    # [[a,b],[c,d],[a,b],[c,d]] -> [a,a,b,b,c,c,d,d]
    tr = tf.transpose(tile, perm=[1, 0])
    x_grave_spreaded = tf.reshape(tr, [-1])

    # Create centroids:
    # [0.5, 1.5, 0.5, 1.5 etc]
    half = tf.constant(0.5)
    step = tf.constant(1.0)
    end = tf.subtract(tf.to_float(_spread_factor)[0], half)
    bound = tf.reshape(_spread_factor, [])
    centroids = tf.tile(tf.range(half, bound, step), [tf.shape(x_prime)[0]])

    # Calculate max(0, 1 - abs(c - x'))
    num_features = tf.shape(centroids)[0]
    absolute = tf.abs(tf.subtract(centroids, x_grave_spreaded))
    right = tf.subtract(tf.ones(num_features), absolute)
    nc = tf.maximum(tf.zeros(num_features), right)

    # Perform the function nc(x')
    result = tf.where(
        tf.logical_or(
            tf.logical_and(tf.equal(centroids, half),
                           tf.less(x_grave_spreaded, centroids)),
            tf.logical_and(tf.equal(centroids, end),
                           tf.greater(x_grave_spreaded, centroids))),
        tf.ones(tf.shape(x_grave_spreaded)[0]), nc)

    # Reshape the data
    num_spreaded_features = tf.multiply(_spread_factor[0],
                                        tf.shape(layer_1)[1])
    spread_layer = tf.reshape(result,
                              [tf.shape(layer_1)[0], num_spreaded_features])

    # Next hidden layer
    # layer_3 = tf.add(tf.matmul(spread_layer, weights['w2']), biases['b2'])
    # layer_3 = tf.nn.relu(layer_3)
    #
    # layer_4 = tf.add(tf.matmul(layer_3, weights['w3']), biases['b3'])
    # layer_4 = tf.nn.relu(layer_4)

    # Output fully connected layer with a neuron for each class
    out_layer = tf.add(tf.matmul(spread_layer, weights['out']), biases['out'])
    # out_layer = tf.nn.relu(out_layer)
    return out_layer
Example #56
0
    def train_cnn(self):
        y_predict = self.model()
        print(">>> input batch predict shape: {}".format(y_predict.shape))
        print(">>> End model test")
        # 计算概率 损失
        with tf.name_scope('cost'):
            cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_predict, labels=self.Y))
        # 梯度下降
        with tf.name_scope('train'):
            optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
        # 计算准确率
        predict = tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len])  # 预测结果
        max_idx_p = tf.argmax(predict, 2)  # 预测结果
        max_idx_l = tf.argmax(tf.reshape(self.Y, [-1, self.max_captcha, self.char_set_len]), 2)  # 标签
        # 计算准确率
        correct_pred = tf.equal(max_idx_p, max_idx_l)
        with tf.name_scope('char_acc'):
            accuracy_char_count = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        with tf.name_scope('image_acc'):
            accuracy_image_count = tf.reduce_mean(tf.reduce_min(tf.cast(correct_pred, tf.float32), axis=1))
        # 模型保存对象
        saver = tf.train.Saver()
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            # 恢复模型
            # print(os.path.exists(self.model_save_dir), "xieshiyu")
            if os.path.exists(self.model_save_dir):
                try:
                    saver.restore(sess, self.model_save_dir)
                # 判断捕获model文件夹中没有模型文件的错误
                except ValueError:
                    print("model文件夹为空,将创建新模型")
            else:
                pass
            # 写入日志
            tf.summary.FileWriter("logs/", sess.graph)

            step = 1
            for i in range(self.cycle_stop):
                batch_x, batch_y = self.get_batch(i, size=self.train_batch_size)
                # 梯度下降训练
                _, cost_ = sess.run([optimizer, cost],
                                    feed_dict={self.X: batch_x, self.Y: batch_y, self.keep_prob: 0.75})
                if step % 10 == 0:
                    # 基于训练集的测试
                    batch_x_test, batch_y_test = self.get_batch(i, size=self.train_batch_size)
                    acc_char = sess.run(accuracy_char_count, feed_dict={self.X: batch_x_test, self.Y: batch_y_test, self.keep_prob: 1.})
                    acc_image = sess.run(accuracy_image_count, feed_dict={self.X: batch_x_test, self.Y: batch_y_test, self.keep_prob: 1.})
                    print("第{}次训练 >>> ".format(step))
                    print("[训练集] 字符准确率为 {:.5f} 图片准确率为 {:.5f} >>> loss {:.10f}".format(acc_char, acc_image, cost_))

                    # with open("loss_train.csv", "a+") as f:
                    #     f.write("{},{},{},{}\n".format(step, acc_char, acc_image, cost_))

                    # 基于验证集的测试
                    batch_x_verify, batch_y_verify = self.get_verify_batch(size=self.test_batch_size)
                    acc_char = sess.run(accuracy_char_count, feed_dict={self.X: batch_x_verify, self.Y: batch_y_verify, self.keep_prob: 1.})
                    acc_image = sess.run(accuracy_image_count, feed_dict={self.X: batch_x_verify, self.Y: batch_y_verify, self.keep_prob: 1.})
                    print("[验证集] 字符准确率为 {:.5f} 图片准确率为 {:.5f} >>> loss {:.10f}".format(acc_char, acc_image, cost_))

                    # with open("loss_test.csv", "a+") as f:
                    #     f.write("{}, {},{},{}\n".format(step, acc_char, acc_image, cost_))

                    # 准确率达到99%后保存并停止
                    if acc_image > self.acc_stop:
                        saver.save(sess, self.model_save_dir)
                        print("验证集准确率达到99%,保存模型成功")
                        break
                # 每训练500轮就保存一次
                if i % self.cycle_save == 0:
                    saver.save(sess, self.model_save_dir)
                    print("定时保存模型成功")
                step += 1
            saver.save(sess, self.model_save_dir)
Example #57
0
def calculate_box_3d_info(vec_dir, vec_dir_mag,
                          p1, p2, p3, p4, midpoint):
    """Calculates the box_3d centroid xz, l, w, and ry from the 4 points of
    a box_4c. To calculate length and width, points are projected onto the
    direction vector, and its normal. The centroid is calculated by adding
    vectors of half the length, and the width difference along the normal to
    the starting midpoint. ry is calculated with atan2 of the direction vector.

    Args:
        vec_dir: vector of longest box_4c midpoint to midpoint
        vec_dir_mag: magnitude of the direction vector
        p1: point 1
        p2: point 2
        p3: point 3
        p4: point 4
        midpoint: starting midpoint

    Returns:
        box_3d info (centroid, length_out, width_out, ry_out)
    """
    vec_dir_norm = vec_dir / tf.reshape(vec_dir_mag, [-1, 1])

    vec_mid_p1 = p1 - midpoint
    vec_mid_p2 = p2 - midpoint
    vec_mid_p3 = p3 - midpoint
    vec_mid_p4 = p4 - midpoint

    l1 = tf.reduce_sum(tf.multiply(vec_mid_p1, vec_dir_norm), axis=1)
    l2 = tf.reduce_sum(tf.multiply(vec_mid_p2, vec_dir_norm), axis=1)
    l3 = tf.reduce_sum(tf.multiply(vec_mid_p3, vec_dir_norm), axis=1)
    l4 = tf.reduce_sum(tf.multiply(vec_mid_p4, vec_dir_norm), axis=1)
    all_lengths = tf.stack([l1, l2, l3, l4], axis=1)

    min_l = tf.reduce_min(all_lengths, axis=1, keep_dims=True)
    max_l = tf.reduce_max(all_lengths, axis=1, keep_dims=True)
    length_out = max_l - min_l

    vec_dir_ortho_norm = tf.stack([-vec_dir_norm[:, 1],
                                   vec_dir_norm[:, 0]], axis=1)
    w1 = tf.reduce_sum(tf.multiply(vec_mid_p1,
                                   vec_dir_ortho_norm), axis=1)
    w2 = tf.reduce_sum(tf.multiply(vec_mid_p2,
                                   vec_dir_ortho_norm), axis=1)
    w3 = tf.reduce_sum(tf.multiply(vec_mid_p3,
                                   vec_dir_ortho_norm), axis=1)
    w4 = tf.reduce_sum(tf.multiply(vec_mid_p4,
                                   vec_dir_ortho_norm), axis=1)
    all_widths = tf.stack([w1, w2, w3, w4], axis=1)

    min_w = tf.reduce_min(all_widths, axis=1)
    max_w = tf.reduce_max(all_widths, axis=1)
    w_diff = tf.reshape(max_w + min_w, [-1, 1])
    width_out = tf.reshape(max_w - min_w, [-1, 1])

    ry_out = tf.reshape(-tf.atan2(vec_dir[:, 1], vec_dir[:, 0]), [-1, 1])

    # New centroid
    centroid = midpoint +\
        vec_dir_norm * (min_l + max_l) / 2.0 + \
        vec_dir_ortho_norm * w_diff

    return centroid, length_out, width_out, ry_out
Example #58
0
    def train(imPath, validPath, testPath, logPath, modelPath, pmPath, nTrain,
              nValid, nTest, restoreVariables, nSteps, gpuIndex, testPMIndex):
        os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpuIndex

        outLogPath = logPath
        trainWriterPath = pathjoin(logPath, 'Train')
        validWriterPath = pathjoin(logPath, 'Valid')
        outModelPath = pathjoin(modelPath, 'model.ckpt')
        outPMPath = pmPath

        batchSize = UNet2D.hp['batchSize']
        imSize = UNet2D.hp['imSize']
        nChannels = UNet2D.hp['nChannels']
        nClasses = UNet2D.hp['nClasses']

        # --------------------------------------------------
        # data
        # --------------------------------------------------
        nAug = 6
        Train = np.zeros((nTrain, imSize, imSize, nAug, nChannels))
        Valid = np.zeros((nValid, imSize, imSize, nAug, nChannels))
        Test = np.zeros((nTest, imSize, imSize, nAug, nChannels))
        LTrain = np.zeros((nTrain, imSize, imSize, nClasses))
        LValid = np.zeros((nValid, imSize, imSize, nClasses))
        LTest = np.zeros((nTest, imSize, imSize, nClasses))
        WTrain = np.zeros((nTrain, imSize, imSize, nClasses))
        WValid = np.zeros((nValid, imSize, imSize, nClasses))
        WTest = np.zeros((nTest, imSize, imSize, nClasses))

        print('loading data, computing mean / st dev')
        if not os.path.exists(modelPath):
            os.makedirs(modelPath)
        # if restoreVariables:
        # 	datasetMean = loadData(pathjoin(modelPath,'datasetMean.data'))
        # 	datasetStDev = loadData(pathjoin(modelPath,'datasetStDev.data'))
        # else:
        datasetMean = 0.25
        datasetStDev = 0.25
        bgWeight = 1
        contourWeight = 2
        nucleiWeight = 7
        intersectWeight = 15

        # for iSample in range(nTrain + nValid + nTest):
        #     I = im2double(tifread('%s/I%05d_Img.tif' % (imPath, iSample)))
        #     datasetMean += np.mean(I)
        #     datasetStDev += np.std(I)
        # datasetMean /= (nTrain + nValid + nTest)
        # datasetStDev /= (nTrain + nValid + nTest)
        saveData(datasetMean, pathjoin(modelPath, 'datasetMean.data'))
        saveData(datasetStDev, pathjoin(modelPath, 'datasetStDev.data'))

        perm = np.arange(nTrain)
        np.random.shuffle(perm)

        for iSample in range(0, nTrain):
            path = '%s/I%05d_Img.tif' % (imPath, perm[iSample])
            for iChan in range(nChannels):
                for iAug in range(nAug):
                    im = im2double(
                        skio.imread(path, img_num=iAug + nAug * iChan))
                    Train[iSample, :, :, iAug,
                          iChan] = (im - datasetMean) / datasetStDev
            path = '%s/I%05d_Ant.tif' % (imPath, perm[iSample])
            im = tifread(path)
            path = '%s/I%05d_wt.tif' % (imPath, perm[iSample])
            W = tifread(path)
            for i in range(nClasses):
                LTrain[iSample, :, :, i] = (im == i + 1)
                if i == 1:
                    WTrain[iSample, :, :,
                           i] = (W * intersectWeight) + contourWeight
                elif i == 2:
                    WTrain[iSample, :, :, i] = (W * 0) + nucleiWeight
                else:
                    WTrain[iSample, :, :, i] = (W * 0) + bgWeight

        permV = np.arange(nValid)
        np.random.shuffle(permV)
        for iSample in range(0, nValid):
            path = '%s/I%05d_Img.tif' % (validPath, permV[iSample])
            # im = im2double(tifread(path))
            for iChan in range(nChannels):
                for iAug in range(nAug):
                    im = im2double(
                        skio.imread(path, img_num=iAug + nAug * iChan))
                    Valid[iSample, :, :, iAug,
                          iChan] = (im - datasetMean) / datasetStDev
            path = '%s/I%05d_Ant.tif' % (validPath, permV[iSample])
            im = tifread(path)
            path = '%s/I%05d_wt.tif' % (validPath, permV[iSample])
            W = tifread(path)
            for i in range(nClasses):
                LValid[iSample, :, :, i] = (im == i + 1)
                if i == 1:
                    WValid[iSample, :, :,
                           i] = (W * intersectWeight) + contourWeight
                elif i == 2:
                    WValid[iSample, :, :, i] = (W * 0) + nucleiWeight
                else:
                    WValid[iSample, :, :, i] = (W * 0) + bgWeight

        for iSample in range(0, nTest):
            path = '%s/I%05d_Img.tif' % (testPath, iSample)
            for iChan in range(nChannels):
                for iAug in range(nAug):
                    im = im2double(
                        skio.imread(path, img_num=iAug + nAug * iChan))
                    Test[iSample, :, :, iAug,
                         iChan] = (im - datasetMean) / datasetStDev
            path = '%s/I%05d_Ant.tif' % (testPath, iSample)
            im = tifread(path)
            path = '%s/I%05d_wt.tif' % (testPath, iSample)
            W = tifread(path)
            for i in range(nClasses):
                LTest[iSample, :, :, i] = (im == i + 1)
                if i == 1:
                    WTest[iSample, :, :,
                          i] = (W * intersectWeight) + contourWeight
                elif i == 2:
                    WTest[iSample, :, :, i] = (W * 0) + nucleiWeight
                else:
                    WTest[iSample, :, :, i] = (W * 0) + bgWeight

        # --------------------------------------------------
        # optimization
        # --------------------------------------------------

        tfLabels = tf.placeholder("float",
                                  shape=[None, imSize, imSize, nClasses],
                                  name='labels')
        tfWeights = tf.placeholder("float",
                                   shape=[None, imSize, imSize, nClasses],
                                   name='weights')
        globalStep = tf.Variable(0, trainable=False)
        learningRate0 = 0.00005
        decaySteps = 5000
        decayRate = 0.98
        learningRate = tf.train.exponential_decay(learningRate0,
                                                  globalStep,
                                                  decaySteps,
                                                  decayRate,
                                                  staircase=True)

        with tf.name_scope('optim'):
            l2_loss = tf.losses.get_regularization_loss()
            eps = 1e-7
            log_p = tf.log(tf.clip_by_value(UNet2D.nn, eps, 1 - eps))
            loss = tf.reduce_mean(-tf.reduce_sum(
                tf.multiply(tf.cast(tfWeights, tf.float32),
                            tf.multiply(tf.cast(tfLabels, tf.float32), log_p)),
                3)) + l2_loss

            updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            # optimizer = tf.train.MomentumOptimizer(1e-3,0.9)
            # optimizer = tf.train.MomentumOptimizer(learningRate,0.99)
            optimizer = tf.train.AdamOptimizer(learning_rate=learningRate)
            with tf.control_dependencies(updateOps):
                optOp = optimizer.minimize(loss, global_step=globalStep)

        # for g, v in gradients:
        # 	tf.summary.histogram(v.name, v)
        # 	tf.summary.histogram(v.name + '_grad', g)

        with tf.name_scope('eval'):
            error = []
            for iClass in range(nClasses):
                labels0 = tf.reshape(
                    tf.to_int32(
                        tf.slice(tfLabels, [0, 0, 0, iClass],
                                 [-1, -1, -1, 1])),
                    [batchSize, imSize, imSize])
                predict0 = tf.reshape(
                    tf.to_int32(tf.equal(tf.argmax(UNet2D.nn, 3), iClass)),
                    [batchSize, imSize, imSize])
                correct = tf.multiply(labels0, predict0)
                nCorrect0 = tf.reduce_sum(correct)
                nLabels0 = tf.reduce_sum(labels0)
                error.append(1 -
                             tf.to_float(nCorrect0) / tf.to_float(nLabels0))
            errors = tf.tuple(error)

        # --------------------------------------------------
        # inspection
        # --------------------------------------------------

        with tf.name_scope('scalars'):
            tf.summary.scalar('avg_cross_entropy', loss)
            for iClass in range(nClasses):
                tf.summary.scalar('avg_pixel_error_%d' % iClass, error[iClass])
            tf.summary.scalar('learning_rate', learningRate)
        with tf.name_scope('images'):
            split0 = tf.slice(UNet2D.nn, [0, 0, 0, 1], [-1, -1, -1, 1])
            split1 = tf.slice(UNet2D.tfData, [0, 0, 0, 0], [-1, -1, -1, 1])

            planeImN = tf.div(
                tf.subtract(split1,
                            tf.reduce_min(split1, axis=(1, 2),
                                          keep_dims=True)),
                tf.subtract(tf.reduce_max(split1, axis=(1, 2), keep_dims=True),
                            tf.reduce_min(split1, axis=(1, 2),
                                          keep_dims=True)))
            splitL = tf.slice(UNet2D.tfData, [0, 0, 0, 1], [-1, -1, -1, 1])
            planeImN2 = tf.div(
                tf.subtract(splitL,
                            tf.reduce_min(splitL, axis=(1, 2),
                                          keep_dims=True)),
                tf.subtract(tf.reduce_max(splitL, axis=(1, 2), keep_dims=True),
                            tf.reduce_min(splitL, axis=(1, 2),
                                          keep_dims=True)))

            plane = tf.concat([planeImN, split0], 2)
            split2 = tf.slice(UNet2D.nn, [0, 0, 0, 2], [-1, -1, -1, 1])

            # planeImN2 = tf.div(tf.subtract(split3, tf.reduce_min(split3, axis=(1, 2), keep_dims=True)),
            #                   tf.subtract(tf.reduce_max(split3, axis=(1, 2), keep_dims=True),
            #                               tf.reduce_min(split3, axis=(1, 2), keep_dims=True)))
            plane = tf.concat([plane, split2, planeImN2], 2)
            tf.summary.image('impm', plane, max_outputs=4)
        merged = tf.summary.merge_all()

        # --------------------------------------------------
        # session
        # --------------------------------------------------

        saver = tf.train.Saver()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        sess = tf.Session(
            config=config
        )  # config parameter needed to save variables when using GPU

        if os.path.exists(outLogPath):
            shutil.rmtree(outLogPath)
        trainWriter = tf.summary.FileWriter(trainWriterPath, sess.graph)
        validWriter = tf.summary.FileWriter(validWriterPath, sess.graph)

        if restoreVariables:
            saver.restore(sess, outModelPath)
            print("Model restored.")
        else:
            sess.run(tf.global_variables_initializer())

        # --------------------------------------------------
        # train
        # --------------------------------------------------

        batchData = np.zeros((batchSize, imSize, imSize, nChannels))
        batchLabels = np.zeros((batchSize, imSize, imSize, nClasses))
        batchWeights = np.zeros((batchSize, imSize, imSize, nClasses))
        permT = np.arange(nTrain)
        np.random.shuffle(permT)

        permV = np.arange(nValid)
        np.random.shuffle(permV)

        maxBrig = 1 * datasetStDev
        maxCont = 0.1 * datasetStDev
        jT = 0
        jV = 0
        epochCounter = 1
        for i in range(nSteps):
            # train

            for j in range(batchSize):
                fBrig = maxBrig * np.float_power(
                    -1,
                    np.random.rand() < 0.5) * np.random.rand()
                fCont = 1 + maxCont * np.float_power(
                    -1,
                    np.random.rand() < 0.5) * np.random.rand()
                image = np.zeros((imSize, imSize, nChannels))
                for iChan in range(nChannels):
                    image[:, :,
                          iChan] = Train[permT[jT + j], :, :,
                                         math.floor(6 * np.random.rand()),
                                         iChan] * fCont + fBrig
                    # image[:, :, iChan] = Train[permT[jT + j], :, :, 0,iChan]
                batchData[j, :, :, :] = image

                batchLabels[j, :, :, :] = LTrain[permT[jT + j], :, :, :]
                batchWeights[j, :, :, :] = WTrain[permT[jT + j], :, :, :]
            summary, _ = sess.run(
                [merged, optOp],
                feed_dict={
                    UNet2D.tfData: batchData,
                    tfLabels: batchLabels,
                    tfWeights: batchWeights,
                    UNet2D.tfTraining: 1
                })
            jT = jT + batchSize
            if jT > (nTrain - batchSize - 1):
                jT = 0
                np.random.shuffle(permT)
                epochCounter = epochCounter + 1
            if np.mod(i, 20) == 0:
                trainWriter.add_summary(summary, i)

            # validation
            for j in range(batchSize):
                image = np.zeros((imSize, imSize, nChannels))
                image[:, :, 0] = Valid[permV[jV + j], :, :,
                                       math.floor(6 * np.random.rand()), 0]
                image[:, :, 1] = Valid[permV[jV + j], :, :, 0, 1]
                batchData[j, :, :, :] = image
                batchLabels[j, :, :, :] = LValid[permV[jV + j], :, :, :]
                batchWeights[j, :, :, :] = WValid[permV[jV + j], :, :, :]
            summary, es = sess.run(
                [merged, errors],
                feed_dict={
                    UNet2D.tfData: batchData,
                    tfLabels: batchLabels,
                    tfWeights: batchWeights,
                    UNet2D.tfTraining: 0
                })
            jV = jV + batchSize
            if jV > (nValid - batchSize - 1):
                jV = 0
                np.random.shuffle(permV)
            if np.mod(i, 20) == 0:
                validWriter.add_summary(summary, i)

            e = np.mean(es)
            print('step %05d, e: %f' % (i, e) + ', epoch: ' +
                  str(epochCounter))

            if i == 0:
                if restoreVariables:
                    lowestError = e
                else:
                    lowestError = np.inf

            if np.mod(i, 1000) == 0 and e < lowestError:
                lowestError = e
                print("Model saved in file: %s" %
                      saver.save(sess, outModelPath))

        # --------------------------------------------------
        # save hyper-parameters, clean-up
        # --------------------------------------------------

        saveData(UNet2D.hp, pathjoin(modelPath, 'hp.data'))

        trainWriter.close()
        validWriter.close()
        sess.close()

        # --------------------------------------------------
        # test
        # --------------------------------------------------
        os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpuIndex
        tf.reset_default_graph()
        variablesPath = pathjoin(modelPath, 'model.ckpt')
        outPMPath = pmPath

        hp = loadData(pathjoin(modelPath, 'hp.data'))
        UNet2D.setupWithHP(hp)
        saver = tf.train.Saver()
        sess = tf.Session(
            config=tf.ConfigProto(allow_soft_placement=True)
        )  # config parameter needed to save variables when using GPU
        saver.restore(sess, variablesPath)
        print("Model restored.")

        if not os.path.exists(outPMPath):
            os.makedirs(outPMPath)
        for iAug in range(6):
            for i in range(nTest):
                j = np.mod(i, batchSize)
                image = np.zeros((imSize, imSize, nChannels))
                image[:, :, 0] = Test[i, :, :, iAug, 0]
                image[:, :, 1] = Test[i, :, :, 0, 1]
                batchData[j, :, :, :] = image
                batchLabels[j, :, :, :] = LTest[i, :, :, :]

                if j == batchSize - 1 or i == nTest - 1:

                    output = sess.run(UNet2D.nn,
                                      feed_dict={
                                          UNet2D.tfData: batchData,
                                          UNet2D.tfTraining: 0
                                      })

                    for k in range(j + 1):
                        pm = output[k, :, :, 2]
                        gt = batchLabels[k, :, :, 2]
                        im = np.sqrt(normalize(batchData[k, :, :, 0]))
                        imwrite(
                            np.uint8(255 * np.concatenate(
                                (im, np.concatenate(
                                    (pm, gt), axis=1)), axis=1)),
                            '%s/I%05d_%d_Nuc.png' %
                            (outPMPath, i - j + k + 1, iAug))

                    for k in range(j + 1):
                        pm = output[k, :, :, 1]
                        gt = batchLabels[k, :, :, 1]
                        im = np.sqrt(normalize(batchData[k, :, :, 1]))
                        imwrite(
                            np.uint8(255 * np.concatenate(
                                (im, np.concatenate(
                                    (pm, gt), axis=1)), axis=1)),
                            '%s/I%05d_%d_Con.png' %
                            (outPMPath, i - j + k + 1, iAug))
Example #59
0
#portfolioSigma = tf.sqrt(portfolioCov, name = 'portsigma')
portfolioRate = tf.multiply(tfWeight, tfRate, name='portr')

#(1,n)
#marginalRisk = tf.gradients(portfolioSigma, tfWeight)

#(1,n)
#riskContribution = tf.multiply(tfWeight, marginalRisk[0], name = 'riskcontri')
#riskConcentration = tf.zeros(1, dtype = tf.float32)

#for p in range(nAsset):
#for q in range(nAsset):
#riskConcentration += (riskContribution[0, p] - riskContribution[0, q])**2

loss = portfolioCov - tf.minimum(
    tf.reduce_min(tfWeight), 0) + (tf.reduce_sum(tfWeight) - 1)**2 + (
        tf.reduce_sum(portfolioRate) - float(0.06 / 360))**2
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimize = optimizer.minimize(loss)

freq = 'M'
# firstTradeDateInMonth = tradeDate.groupby(pd.TimeGrouper(freq='M')).min()
firstTradeDateInMonth = tradeDate.groupby(pd.TimeGrouper(freq=freq)).min()

portfolioRets = []
mdates = []
optimWeigts = []
print("-----" + str(len(firstTradeDateInMonth)))
init = tf.global_variables_initializer()
needReCal = True
calConvCount = 3
Example #60
0
# Author: Jintao Huang
# Email: [email protected]
# Date: 2021-6-1

import tensorflow as tf

# ----------------- max min sort
x = tf.constant([1, 2])
y = tf.constant([0, 3])
print(tf.reduce_max(x))
print(tf.maximum(x, y))
print(tf.argmax(x))
# tf.Tensor(2, shape=(), dtype=int32)
# tf.Tensor([1 3], shape=(2,), dtype=int32)
# tf.Tensor(1, shape=(), dtype=int64)
print(tf.reduce_min(x))
print(tf.minimum(x, y))
print(tf.argmin(x))
# tf.Tensor(1, shape=(), dtype=int32)
# tf.Tensor([0 2], shape=(2,), dtype=int32)
# tf.Tensor(0, shape=(), dtype=int64)
print(tf.sort(x, direction='DESCENDING'))  # const
print(x)
print(tf.argsort(x))
# tf.Tensor([2 1], shape=(2,), dtype=int32)
# tf.Tensor([1 2], shape=(2,), dtype=int32)
# tf.Tensor([0 1], shape=(2,), dtype=int32)