def _testMultipleReduceJoin(self, input_array, reduction_indices, separator=" "):
        """Tests reduce_join for one input and multiple reduction_indices.

    Does so by comparing the output to that from nested reduce_string_joins.
    The correctness of single-dimension reduce_join is verified by other
    tests below using _testReduceJoin.

    Args:
      input_array: The input to test.
      reduction_indices: The indices to reduce.
      separator: The separator to use when joining.
    """
        num_dims = len(input_array.shape)
        truth_red_indices = reduction_indices or list(reversed(xrange(num_dims)))
        with self.test_session():
            output = tf.reduce_join(
                inputs=input_array, reduction_indices=reduction_indices, keep_dims=False, separator=separator
            )
            output_keep_dims = tf.reduce_join(
                inputs=input_array, reduction_indices=reduction_indices, keep_dims=True, separator=separator
            )

            truth = input_array
            for index in truth_red_indices:
                truth = tf.reduce_join(inputs=truth, reduction_indices=index, keep_dims=True, separator=separator)
            truth_squeezed = tf.squeeze(truth, squeeze_dims=truth_red_indices)
            output_array = output.eval()
            output_keep_dims_array = output_keep_dims.eval()
            truth_array = truth.eval()
            truth_squeezed_array = truth_squeezed.eval()
        self.assertAllEqualUnicode(truth_array, output_keep_dims_array)
        self.assertAllEqualUnicode(truth_squeezed_array, output_array)
        self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())
        self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
Example #2
0
  def create_metric_ops(self, _inputs, labels, predictions):
    """Creates (value, update_op) tensors
    """
    with tf.variable_scope(self._name):

      # Join tokens into single strings
      predictions_flat = tf.reduce_join(
          predictions["predicted_tokens"], 1, separator=self._separator)
      labels_flat = tf.reduce_join(
          labels["target_tokens"], 1, separator=self._separator)

      sources_value, sources_update = accumulate_strings(
          values=predictions_flat, name="sources")
      targets_value, targets_update = accumulate_strings(
          values=labels_flat, name="targets")

      metric_value = tf.py_func(
          func=self._py_func,
          inp=[sources_value, targets_value],
          Tout=tf.float32,
          name="value")

    with tf.control_dependencies([sources_update, targets_update]):
      update_op = tf.identity(metric_value, name="update_op")

    return metric_value, update_op
 def testZeroDims(self):
   valid_truth_shape = [0]
   with self.test_session():
     inputs = np.zeros([0, 1], dtype=str)
     with self.assertRaisesRegexp(ValueError, "dimension 0 with size 0"):
       tf.reduce_join(inputs=inputs, reduction_indices=0)
     valid = tf.reduce_join(inputs=inputs, reduction_indices=1)
     valid_array_shape = valid.eval().shape
     self.assertAllEqualUnicode(valid_truth_shape, valid_array_shape)
 def testInvalidArgsUnknownShape(self):
   with self.test_session():
     placeholder = tf.placeholder(tf.string, name="placeholder")
     index_too_high = tf.reduce_join(placeholder, reduction_indices=1)
     duplicate_index = tf.reduce_join(placeholder, reduction_indices=[-1, 1])
     with self.assertRaisesOpError("Invalid reduction dimension 1"):
       index_too_high.eval(feed_dict={placeholder.name: [""]})
     with self.assertRaisesOpError("Duplicate reduction dimension 1"):
       duplicate_index.eval(feed_dict={placeholder.name: [[""]]})
Example #5
0
def main(_):
    path_to_image_file = FLAGS.image
    path_to_restore_checkpoint_file = FLAGS.restore_checkpoint

    image = tf.image.decode_jpeg(tf.read_file(path_to_image_file), channels=3)
    image = tf.reshape(image, [64, 64, 3])
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    image = tf.multiply(tf.subtract(image, 0.5), 2)
    image = tf.image.resize_images(image, [54, 54])
    images = tf.reshape(image, [1, 54, 54, 3])

    length_logits, digits_logits = Model.inference(images, drop_rate=0.0)
    length_predictions = tf.argmax(length_logits, axis=1)
    digits_predictions = tf.argmax(digits_logits, axis=2)
    digits_predictions_string = tf.reduce_join(tf.as_string(digits_predictions), axis=1)

    with tf.Session() as sess:
        restorer = tf.train.Saver()
        restorer.restore(sess, path_to_restore_checkpoint_file)

        length_predictions_val, digits_predictions_string_val = sess.run([length_predictions, digits_predictions_string])
        length_prediction_val = length_predictions_val[0]
        digits_prediction_string_val = digits_predictions_string_val[0]
        print 'length: %d' % length_prediction_val
        print 'digits: %s' % digits_prediction_string_val
Example #6
0
 def lowercase(self, raw_post):
     split_chars = tf.string_split(tf.reshape(raw_post, [-1]), delimiter="").values
     upchar_inds = self.upchars_lut.lookup(split_chars)
     return tf.reduce_join(tf.map_fn(lambda x: tf.cond(x[0] > 25,
                                                       lambda: x[1],
                                                       lambda: self.lchars[x[0]]),
                                     (upchar_inds, split_chars), dtype=tf.string))
Example #7
0
  def get_text(self, ids):
    """Returns a string corresponding to a sequence of character ids.

        Args:
          ids: a tensor with shape [batch_size, max_sequence_length]
        """
    return tf.reduce_join(
      self.table.lookup(tf.to_int64(ids)), reduction_indices=1)
Example #8
0
 def testUnknownShape(self):
   input_array = [["a"], ["b"]]
   truth = ["ab"]
   with self.test_session():
     placeholder = tf.placeholder(tf.string, name="placeholder")
     reduced = tf.reduce_join(placeholder, reduction_indices=0)
     output_array = reduced.eval(feed_dict={placeholder.name: input_array})
     self.assertAllEqualUnicode(truth, output_array)
    def testInvalidArgsUnknownIndices(self):
        with self.test_session():
            placeholder = tf.placeholder(tf.int32, name="placeholder")
            reduced = tf.reduce_join(["test", "test2"], reduction_indices=placeholder)

            with self.assertRaisesOpError("reduction dimension -2"):
                reduced.eval(feed_dict={placeholder.name: -2})
            with self.assertRaisesOpError("reduction dimension 2"):
                reduced.eval(feed_dict={placeholder.name: 2})
Example #10
0
 def resize_sen(self, raw, mxlen):
     """
     Splits and rejoins a string to ensure that tokens meet
     the required max len.
     """
     raw_tokens = tf.string_split(tf.reshape(raw, [-1])).values
     # sentence length > mxlen
     raw_post = tf.reduce_join(raw_tokens[:mxlen], separator=" ")
     return raw_post
Example #11
0
 def testUnknownIndices(self):
   input_array = [["this", "is", "a", "test"],
                  ["please", "do", "not", "panic"]]
   truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
   truth_dim_one = ["thisisatest", "pleasedonotpanic"]
   with self.test_session():
     placeholder = tf.placeholder(tf.int32, name="placeholder")
     reduced = tf.reduce_join(input_array, reduction_indices=placeholder)
     output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
     output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
     self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
     self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
    def _testReduceJoin(self, input_array, truth, truth_shape, reduction_indices, keep_dims=False, separator=""):
        """Compares the output of reduce_join to an expected result.

    Args:
      input_array: The string input to be joined.
      truth: An array or np.array of the expected result.
      truth_shape: An array or np.array of the expected shape.
      reduction_indices: The indices to reduce over.
      keep_dims: Whether or not to retain reduced dimensions.
      separator: The separator to use for joining.
    """
        with self.test_session():
            output = tf.reduce_join(
                inputs=input_array, reduction_indices=reduction_indices, keep_dims=keep_dims, separator=separator
            )
            output_array = output.eval()

        self.assertAllEqualUnicode(truth, output_array)
        self.assertAllEqual(truth_shape, output.get_shape())
Example #13
0
def markdown_table(step):
  # The text summary can also contain Markdown, including Markdown
  # tables. Markdown tables look like this:
  #
  #     | hello | there |
  #     |-------|-------|
  #     | this  | is    |
  #     | a     | table |
  #
  # The leading and trailing pipes in each row are optional, and the text
  # doesn't actually have to be neatly aligned, so we can create these
  # pretty easily. Let's do so.
  header_row = 'Pounds of chocolate | Happiness'
  chocolate = tf.range(step)
  happiness = tf.square(chocolate + 1)
  chocolate_column = tf.as_string(chocolate)
  happiness_column = tf.as_string(happiness)
  table_rows = tf.string_join([chocolate_column, " | ", happiness_column])
  table_body = tf.reduce_join(table_rows, separator='\n')
  table = tf.string_join([header_row, "---|---", table_body], separator='\n')
  preamble = 'We conducted an experiment and found the following data:\n\n'
  result = tf.string_join([preamble, table])
  tf.summary.text('chocolate_study', result)
Example #14
0
def bpe2word(tensor):
    joined_tensor = tf.reduce_join(tensor)
    replaced_tensor = tf.regex_replace(joined_tensor, "▁", " ")
    return tf.string_split([replaced_tensor]).values
Example #15
0
tf.sparse_accumulator_take_gradient()
tf.sparse_add()
tf.sparse_concat()
tf.sparse_conditional_accumulator()
tf.sparse_mask()
tf.sparse_matmul()
tf.sparse_maximum()
tf.sparse_merge()
tf.sparse_minimum()

tf.sparse_reduce_max()
tf.sparse_reduce_max_sparse()

tf.reduce_all()
tf.reduce_any()
tf.reduce_join()
tf.reduce_logsumexp()
tf.reduce_max()
tf.reduce_mean()
tf.reduce_min()
tf.reduce_prod()
tf.reduce_sum()
tf.reduced_shape()

tf.random_crop()
tf.random_gamma()
tf.random_normal()
tf.random_poisson()
tf.random_poisson_v2()
tf.random_shuffle()
tf.random_uniform()
Example #16
0
def get_text(self,ids):
    return tf.reduce_join(
        self.table.lookup(tf.to_int64(ids)),reduction_indices=1
    )
Example #17
0
 def from_tokens(raw, lookup_):
   gathered = tf.gather(lookup_, tf.cast(raw, tf.int32))
   joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b"<EOS>.*", b"")
   cleaned = tf.regex_replace(joined, b"_", b" ")
   tokens = tf.string_split(cleaned, " ")
   return tokens
Example #18
0
outputs = []

for i in range(paral_nets):
    h = tf.matmul(x, W1[i]) + b1[i]
#     h = tf.matmul(x, W1[i])
#     h = tf.sign(h)
    h = tf.nn.relu(h)
    h2 = tf.matmul(h, W2[i]) + b2[i]
    # h2 = tf.sign(h2)
    h2 = tf.nn.relu(h2)
    logits = tf.matmul(h2, W3[i]) + b3[i]
#     logits = tf.matmul(h, W2[i]) + b2[i]
#     logits = tf.matmul(h, W2[i])
    o = tf.sign(logits)
#     outputs.append((o+1)/2)
    outputs.append(tf.reduce_join(tf.reduce_join(tf.as_string(tf.cast((o+1)//2,tf.int32)), 0),0))

session = tf.Session()

inputs = [[float(xx) for xx in "{0:07b}".format(i)] for i in range(2**7)]

# N=10
cnt = Counter()
#weights = {}
for i in range(N):
    # if i%(N/100) == 0:
    print(i)
    session.run(tf.global_variables_initializer())
    fs = session.run(outputs, feed_dict={x:inputs})
    varss = session.run(variables,feed_dict={x:inputs})
    #for i,f in enumerate(fs):
Example #19
0
    def build_network(self,
                      names,
                      sources,
                      targets,
                      masks,
                      angles,
                      views,
                      is_training=False,
                      is_validation=False,
                      is_testing=False,
                      is_encoding=False):
        """
			input:
				names         :  n     x String             shape names
				sources       :  n     x H x W x C          source images
				targets       :  (n*m) x H x W x C          target images in m views (ground-truth)
				masks         :  (n*m) x H x W x 1          target boolean masks in m views (ground-truth)
				angles        :  (n*m) x 4                  viewing angle parameters (m=1 for continuous view prediction)
				views         :  vw.Views                   view points information
				is_training   :  boolean                    whether it is in training routine
				is_validation :  boolean                    whether it is handling validation data set
				is_testing    :  boolean                    whether it is in testing routine
				is_encoding   :  boolean                    whether it is encoding input
		"""

        print('Building network...')

        source_size = sources.get_shape().as_list()
        if self.config.continuous_view:
            num_output_views = 1
        else:
            num_output_views = views.num_views

        # scope names

        var_scope_G = 'G_net'
        var_scope_D = 'D_net'
        bn_scope_G = 'G_bn'
        bn_scope_D = 'D_bn'
        train_summary_G_name = 'train_summary_G'
        train_summary_D_name = 'train_summary_D'
        valid_summary_name = 'valid_summary'

        # generator

        num_channels = targets.get_shape()[3].value
        if not self.config.continuous_view:
            with tf.variable_scope(var_scope_G):
                with tf_framework.arg_scope(layer.unet_scopes(bn_scope_G)):
                    preds, features = network.generateUNet(
                        sources, num_output_views,
                        num_channels)  # (n*m) x H x W x C ; n x D
        else:
            with tf.variable_scope(var_scope_G):
                with tf_framework.arg_scope(layer.cnet_scopes(bn_scope_G)):
                    preds, features = network.generateCNet(
                        sources, angles, num_channels)  # n x H x W x C ; n x D

        if is_encoding:
            self.encode_names = names
            self.encode_features = features
            return  # all stuffs below are irrelevant to encoding pass

        # extract prediction contents

        preds_content = tf.slice(preds, [0, 0, 0, 0],
                                 [-1, -1, -1, num_channels - 1])
        preds_mask = tf.slice(preds, [0, 0, 0, num_channels - 1],
                              [-1, -1, -1, 1])
        preds = image.apply_mask(preds_content, preds_mask)
        targets_content = tf.slice(targets, [0, 0, 0, 0],
                                   [-1, -1, -1, num_channels - 1])
        targets_mask = tf.slice(targets, [0, 0, 0, num_channels - 1],
                                [-1, -1, -1, 1])
        targets = image.apply_mask(targets_content, targets_mask)
        if self.config.predict_normal:
            preds_normal = tf.slice(preds_content, [0, 0, 0, 0],
                                    [-1, -1, -1, 3])
            preds_depth = tf.slice(preds_content, [0, 0, 0, 3],
                                   [-1, -1, -1, 1])
            targets_normal = tf.slice(targets_content, [0, 0, 0, 0],
                                      [-1, -1, -1, 3])
            targets_depth = tf.slice(targets_content, [0, 0, 0, 3],
                                     [-1, -1, -1, 1])
        else:
            preds_depth = preds_content
            preds_normal = tf.tile(tf.zeros_like(preds_depth), [1, 1, 1, 3])
            targets_depth = targets_content
            targets_normal = tf.tile(tf.zeros_like(targets_depth),
                                     [1, 1, 1, 3])

        # expand tensors

        sources_expanded = tf.reshape(
            tf.tile(sources, [1, num_output_views, 1, 1]),
            [-1, source_size[1], source_size[2], source_size[3]
             ])  # (n*m) x H x W x C

        names_expanded = tf.reshape(
            tf.tile(tf.expand_dims(names, 1), [1, num_output_views]), [-1])
        names_suffix = [
            "--%d" % view for batch in range(source_size[0])
            for view in range(num_output_views)
        ]
        names_expanded = tf.reduce_join([names_expanded, names_suffix], 0)
        self.names = names_expanded

        # discriminator

        if not self.config.no_adversarial:
            with tf.variable_scope(var_scope_D):
                with tf_framework.arg_scope(layer.unet_scopes(bn_scope_D)):
                    disc_data = tf.concat([targets, preds], 0)
                    disc_data = tf.concat([
                        tf.concat([sources_expanded, sources_expanded], 0),
                        disc_data
                    ], 3)  # HACK: insert input data for discrimination in UNet
                    probs = network.discriminate(disc_data)  # (n*m*2)

        # losses

        # NOTE: learning hyper-parameters
        lambda_p = 1.0  # image loss
        lambda_a = 0.01  # adversarial loss

        dl = loss.compute_depth_loss(preds_depth, targets_depth, masks)
        nl = loss.compute_normal_loss(preds_normal, targets_normal, masks)
        ml = loss.compute_mask_loss(preds_mask, targets_mask)
        loss_g_p = dl + nl + ml

        if self.config.no_adversarial:
            loss_g_a = 0.0
            loss_d_r = 0.0
            loss_d_f = 0.0
        else:
            probs_targets, probs_preds = tf.split(probs, 2, axis=0)  # (n*m)
            loss_g_a = tf.reduce_sum(-tf.log(tf.maximum(probs_preds, 1e-6)))
            loss_d_r = tf.reduce_sum(-tf.log(tf.maximum(probs_targets, 1e-6)))
            loss_d_f = tf.reduce_sum(
                -tf.log(tf.maximum(1.0 - probs_preds, 1e-6)))

        loss_G = loss_g_p * lambda_p + loss_g_a * lambda_a
        loss_D = loss_d_r + loss_d_f

        if is_validation:
            self.valid_losses = tf.stack(
                [loss_G, loss_g_p, loss_g_a, loss_D, loss_d_r, loss_d_f])
            self.valid_images = tf.stack([
                image.encode_raw_batch_images(preds),
                image.encode_raw_batch_images(targets),
                image.encode_raw_batch_images(preds_normal),
                image.encode_raw_batch_images(preds_depth),
                image.encode_raw_batch_images(preds_mask)
            ])
            self.valid_summary_losses = tf.placeholder(
                tf.float32, shape=self.valid_losses.get_shape())
            vG_all, vG_p, vG_a, vD_all, vD_r, vD_f = tf.unstack(
                self.valid_summary_losses)
            tf.summary.scalar('vG_all',
                              vG_all,
                              collections=[valid_summary_name])
            tf.summary.scalar('vG_p', vG_p, collections=[valid_summary_name])
            tf.summary.scalar('vG_a', vG_a, collections=[valid_summary_name])
            tf.summary.scalar('vD_all',
                              vD_all,
                              collections=[valid_summary_name])
            tf.summary.scalar('vD_r', vD_r, collections=[valid_summary_name])
            tf.summary.scalar('vD_f', vD_f, collections=[valid_summary_name])
            self.valid_summary_op = tf.summary.merge_all(valid_summary_name)
            return  # all stuffs below are irrelevant to validation pass

        self.train_losses_G = tf.stack([loss_G, loss_g_p, loss_g_a])
        self.train_losses_D = tf.stack([loss_D, loss_d_r, loss_d_f])
        tf.summary.scalar('G_all', loss_G, collections=[train_summary_G_name])
        tf.summary.scalar('G_p', loss_g_p, collections=[train_summary_G_name])
        tf.summary.scalar('G_a', loss_g_a, collections=[train_summary_G_name])
        tf.summary.scalar('D_all', loss_D, collections=[train_summary_D_name])
        tf.summary.scalar('D_r', loss_d_r, collections=[train_summary_D_name])
        tf.summary.scalar('D_f', loss_d_f, collections=[train_summary_D_name])

        # statistics on variables

        all_vars = tf.trainable_variables()
        all_vars_G = [var for var in all_vars if var_scope_G in var.name]
        all_vars_D = [var for var in all_vars if var_scope_D in var.name]
        #print('Num all vars: %d' % len(all_vars))
        #print('Num vars on G net: %d' % len(all_vars_G))
        #print('Num vars on D net: %d' % len(all_vars_D))
        num_params_G = 0
        num_params_D = 0
        # print('G vars:')
        for var in all_vars_G:
            num_params_G += np.prod(var.get_shape().as_list())
            # print(var.name, var.get_shape().as_list())
        # print('D vars:')
        for var in all_vars_D:
            num_params_D += np.prod(var.get_shape().as_list())
            # print(var.name, var.get_shape().as_list())
        #print('Num all params: %d + %d = %d' % (num_params_G, num_params_D, num_params_G+num_params_D))
        #input('pause')

        # optimization

        # NOTE: learning hyper-parameters
        init_learning_rate = 0.0001
        adam_beta1 = 0.9
        adam_beta2 = 0.999
        opt_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(init_learning_rate,
                                                   global_step=opt_step,
                                                   decay_steps=10000,
                                                   decay_rate=0.96,
                                                   staircase=True)

        opt_G = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                       beta1=adam_beta1,
                                       beta2=adam_beta2,
                                       name='ADAM_G')
        opt_D = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                       beta1=adam_beta1,
                                       beta2=adam_beta2,
                                       name='ADAM_D')
        # opt_G = tf.train.GradientDescentOptimizer(learning_rate=learning_rate, name='SGD_G')
        # opt_D = tf.train.GradientDescentOptimizer(learning_rate=learning_rate, name='SGD_D')

        grad_G = opt_G.compute_gradients(loss_G,
                                         var_list=all_vars_G,
                                         colocate_gradients_with_ops=True)
        self.grad_G_placeholder = [(tf.placeholder(tf.float32,
                                                   shape=grad[1].get_shape()),
                                    grad[1]) for grad in grad_G
                                   if grad[0] is not None]
        self.grad_G_list = [grad[0] for grad in grad_G if grad[0] is not None]
        self.update_G_op = opt_G.apply_gradients(
            self.grad_G_placeholder,
            global_step=opt_step)  # only update opt_step in G net

        if not self.config.no_adversarial:
            grad_D = opt_D.compute_gradients(loss_D,
                                             var_list=all_vars_D,
                                             colocate_gradients_with_ops=True)
            self.grad_D_placeholder = [
                (tf.placeholder(tf.float32,
                                shape=grad[1].get_shape()), grad[1])
                for grad in grad_D if grad[0] is not None
            ]
            self.grad_D_list = [
                grad[0] for grad in grad_D if grad[0] is not None
            ]
            self.update_D_op = opt_D.apply_gradients(self.grad_D_placeholder)

        # visualization stuffs

        sources_original, sources_flipped = tf.split(sources_expanded,
                                                     2,
                                                     axis=3)
        if len(self.config.sketch_views) == 1:  # single input
            sources_front = sources_original
            sources_side = tf.ones_like(sources_front)  # fake side sketch
            sources_top = tf.ones_like(sources_front)  # fake top sketch
        elif len(self.config.sketch_views) == 2:  # double input
            sources_front, sources_side = tf.split(sources_original, 2, axis=3)
            sources_top = tf.ones_like(sources_front)  # fake top sketch
        elif len(self.config.sketch_views) == 3:  # triple input
            sources_front, sources_side, sources_top = tf.split(
                sources_original, 3, axis=3)
        if sources_front.get_shape()[3].value == 1 and targets.get_shape(
        )[3].value == 4:
            alpha_front = tf.ones_like(sources_front)
            alpha_side = tf.ones_like(sources_side)
            alpha_top = tf.ones_like(sources_top)
            rgb_front = image.convert_to_rgb(sources_front, channels=3)
            rgb_side = image.convert_to_rgb(sources_side, channels=3)
            rgb_top = image.convert_to_rgb(sources_top, channels=3)
            sources_front = tf.concat([rgb_front, alpha_front], 3)
            sources_side = tf.concat([rgb_side, alpha_side], 3)
            sources_top = tf.concat([rgb_top, alpha_top], 3)

        input_row = tf.concat([sources_front, sources_side], 2)
        output_row = tf.concat([targets, preds], 2)

        result_tile = tf.concat([input_row, output_row], 1)
        result_tile = image.saturate_image(
            image.unnormalize_image(result_tile))

        tf.summary.image('result', result_tile, 12, [train_summary_G_name])

        self.train_summary_G_op = tf.summary.merge_all(train_summary_G_name)
        self.train_summary_D_op = tf.summary.merge_all(train_summary_D_name)

        # output images

        num_sketch_views = len(self.config.sketch_views)
        if num_sketch_views == 1:
            all_input_row = sources_front
        elif num_sketch_views == 2:
            all_input_row = tf.concat([sources_front, sources_side], 2)
        elif num_sketch_views == 3:
            all_input_row = tf.concat(
                [sources_front, sources_side, sources_top], 2)
        img_input = image.saturate_image(image.unnormalize_image(
            all_input_row, maxval=65535.0),
                                         dtype=tf.uint16)
        img_gt = image.saturate_image(image.unnormalize_image(targets,
                                                              maxval=65535.0),
                                      dtype=tf.uint16)
        img_output = image.saturate_image(image.unnormalize_image(
            preds, maxval=65535.0),
                                          dtype=tf.uint16)
        png_input = image.encode_batch_images(img_input)
        png_gt = image.encode_batch_images(img_gt)
        png_output = image.encode_batch_images(img_output)

        img_normal = image.saturate_image(image.unnormalize_image(
            preds_normal, maxval=65535.0),
                                          dtype=tf.uint16)
        img_depth = image.saturate_image(image.unnormalize_image(
            preds_depth, maxval=65535.0),
                                         dtype=tf.uint16)
        img_mask = image.saturate_image(image.unnormalize_image(
            preds_mask, maxval=65535.0),
                                        dtype=tf.uint16)
        png_normal = image.encode_batch_images(img_normal)
        png_depth = image.encode_batch_images(img_depth)
        png_mask = image.encode_batch_images(img_mask)
        self.pngs = tf.stack(
            [png_input, png_gt, png_output, png_normal, png_depth, png_mask])

        # output results

        pixel_shape = preds.get_shape().as_list()
        num_pixels = np.prod(pixel_shape[1:])
        self.errors = tf.reduce_sum(
            tf.abs(preds - targets),
            [1, 2, 3]) / num_pixels  # just a quick check
        self.results = preds

        # batch normalization

        bn_G_collection = tf.get_collection(bn_scope_G)
        bn_D_collection = tf.get_collection(bn_scope_D)
        self.bn_G_op = tf.group(*bn_G_collection)
        self.bn_D_op = tf.group(*bn_D_collection)
Example #20
0
 def coords_single_sequence():
     return tf.reduce_join(characters_list, keep_dims=True)
# Tensorflow workshop with Jan Idziak
#-------------------------------------
#
#String operations

# tensor `a` is [["a", "b"], ["c", "d"]]
import tensorflow as tf

sess = tf.Session()
a = tf.convert_to_tensor([["a", "b"], ["c", "d"]])
print(sess.run(a))

tf.reduce_join(a, 0)  #==> ["ac", "bd"]
tf.reduce_join(a, 1)  #==> ["ab", "cd"]
tf.reduce_join(a, -2)  # ==> ["ac", "bd"]
tf.reduce_join(a, -1)  # ==> ["ab", "cd"]
tf.reduce_join(a, 0, keep_dims=True)  #==> [["ac", "bd"]]
tf.reduce_join(a, 1, keep_dims=True)  #==> [["ab"], ["cd"]]
tf.reduce_join(a, 0, separator=".")  #==> ["a.c", "b.d"]
tf.reduce_join(a, [0, 1])  #==> ["acbd"]
tf.reduce_join(a, [1, 0])  #==> ["abcd"]
tf.reduce_join(a, [])  #==> ["abcd"]

b = tf.convert_to_tensor(["ac"])
c = tf.convert_to_tensor(["bd"])
d = tf.string_join([b, c], separator=" ", name=None)
print(sess.run(d))

e = tf.reduce_join(a, 0)
print(tf.string_to_hash_bucket(e, 2))
print(sess.run(tf.string_to_hash_bucket(e, 5)))
Example #22
0
    h5 = tf.matmul(h4, W5[i]) + b5[i]
    h4 = tf.nn.relu(h5)
    h6 = tf.matmul(h5, W6[i]) + b6[i]
    h6 = tf.nn.relu(h6)
    h7 = tf.matmul(h6, W7[i]) + b7[i]
    h7 = tf.nn.relu(h7)
    h8 = tf.matmul(h7, W8[i]) + b8[i]
    h8 = tf.nn.relu(h8)
    logits = tf.matmul(h8, W9[i]) + b9[i]
    #     logits = tf.matmul(h, W2[i]) + b2[i]
    #     logits = tf.matmul(h, W2[i])
    o = tf.sign(logits)
    #     outputs.append((o+1)/2)
    outputs.append(
        tf.reduce_join(
            tf.reduce_join(tf.as_string(tf.cast((o + 1) // 2, tf.int32)), 0),
            0))

session = tf.Session()

inputs = [[float(xx) for xx in "{0:07b}".format(i)] for i in range(2**7)]

# N=10
cnt = Counter()
#weights = {}
for i in range(N):
    # if i%(N/100) == 0:
    print(i)
    session.run(tf.global_variables_initializer())
    fs = session.run(outputs, feed_dict={x: inputs})
    varss = session.run(variables, feed_dict={x: inputs})
Example #23
0
  def _predict_labels(self, examples):
    """Builds tf graph for prediction.

    Args:
      examples: dict of input tensors keyed by name.

    Returns:
      predictions: dict of prediction results keyed by name.
    """
    options = self._model_proto
    is_training = self._is_training

    # Extract input data fields.

    (image, image_id, num_captions, caption_strings,
     caption_lengths) = (examples[InputDataFields.image],
                         examples[InputDataFields.image_id],
                         examples[InputDataFields.num_captions],
                         examples[InputDataFields.caption_strings],
                         examples[InputDataFields.caption_lengths])

    class_act_map_predictions = self._calc_class_act_map(examples)
    (class_act_map,
     logits) = (class_act_map_predictions[VOCPredictions.class_act_map],
                class_act_map_predictions[VOCPredictions.logits])

    # Load the vocabulary.
    vocabulary_list = self._read_vocabulary(options.vocabulary_file)
    tf.logging.info("Read a vocabulary with %i words.", len(vocabulary_list))

    # Encode labels, shape=[batch, num_classes].

    class_labels = self._encode_labels(num_captions, caption_strings,
                                       caption_lengths, vocabulary_list)

    # visualize

    with tf.name_scope("visualize"):
      image_vis = tf.cast(image, tf.uint8)
      image_vis = plotlib.draw_caption(
          image_vis,
          tf.reduce_join(caption_strings[:, 0, :], axis=-1, separator=','),
          org=(5, 5),
          fontscale=1.0,
          color=(255, 0, 0),
          thickness=1)
      image_vis = plotlib.draw_caption(
          image_vis,
          tf.gather(vocabulary_list, tf.argmax(logits, axis=-1)),
          org=(5, 25),
          fontscale=1.0,
          color=(255, 0, 0),
          thickness=1)

      class_act_map_list = []
      batch_size, height, width, _ = utils.get_tensor_shape(image_vis)
      for i, x in enumerate(tf.unstack(class_act_map, axis=-1)):
        x = plotlib.convert_to_heatmap(x, normalize=True, normalize_to=[-4, 4])
        #x = tf.image.resize_images(x, [height, width], tf.image.ResizeMethod.NEAREST_NEIGHBOR)
        x = tf.image.resize_images(x, [height, width])
        x = imgproc.gaussian_filter(x, ksize=32)
        x = tf.image.convert_image_dtype(x, tf.uint8)
        x = plotlib.draw_caption(
            x,
            tf.tile(tf.expand_dims(vocabulary_list[i], axis=0), [batch_size]),
            org=(5, 5),
            fontscale=1.0,
            color=(255, 0, 0),
            thickness=1)
        class_act_map_list.append(x)
      tf.summary.image(
          "image",
          tf.concat([image_vis] + class_act_map_list, axis=2),
          max_outputs=1)

    predictions = {
        VOCPredictions.image_id: image_id,
        VOCPredictions.class_labels: class_labels,
        VOCPredictions.class_act_map: class_act_map,
        VOCPredictions.logits: logits,
    }

    return predictions
Example #24
0
def get_infer_iterator(hparams,
                       src_dataset,
                       src_vocab_table,
                       batch_size,
                       eos,
                       ctx_dataset=None,
                       annot_dataset=None,
                       trie_exclude_dataset=None,
                       src_max_len=None,
                       tgt_vocab_table=None):
    src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)

    # even in append mode, this will be only the source, without attached context.
    src_string_dataset = src_dataset

    # Create a fake dataset for weights.
    wgt_dataset = src_dataset.map(lambda src: tf.constant(1.0))

    # Create a fake context dataset.
    if ctx_dataset is None:
        ctx_dataset = src_dataset.map(lambda src: tf.constant("no context"))

    # Create a fake annotations dataset.
    if annot_dataset is None:
        annot_dataset = src_dataset.map(lambda src: tf.constant("1\t1"))

    # Create a fake trie exclude dataset.
    if trie_exclude_dataset is None:
        trie_exclude_dataset = src_dataset.map(lambda src: tf.constant(""))

    if tgt_vocab_table is None:
        tgt_vocab_table = src_vocab_table

    if hparams.context_feed == "append":
        src_dataset = tf.data.Dataset.zip((ctx_dataset, src_dataset)).map(
            lambda ctx, src: ctx + " " + hparams.context_delimiter + " " + src)

    src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)
    ctx_dataset = ctx_dataset.map(lambda ctx: tf.string_split([ctx]).values)
    annot_dataset = annot_dataset.map(
        # We only need the first column, which contains the doc id that will be
        # later passed to the environment.
        lambda annot: tf.string_split([annot], delimiter="\t").values[0])
    trie_exclude_dataset = trie_exclude_dataset.map(
        lambda tex: tf.string_split([tex]).values)

    if src_max_len:
        src_dataset = src_dataset.map(lambda src: src[:src_max_len])
    # Convert the word strings to ids.
    src_dataset = src_dataset.map(
        lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))
    ctx_dataset = ctx_dataset.map(
        lambda ctx: tf.cast(src_vocab_table.lookup(ctx), tf.int32))
    trie_exclude_dataset = trie_exclude_dataset.map(
        lambda tex: tf.cast(tgt_vocab_table.lookup(tex), tf.int32))

    # Append context with <eos> so the length will be at least 1.
    ctx_dataset = ctx_dataset.map(lambda ctx: tf.concat(
        (ctx, [src_eos_id]), 0))
    trie_exclude_dataset = trie_exclude_dataset.map(
        lambda tex: tf.reduce_join(tf.as_string(tex), separator=" "))

    src_dataset = tf.data.Dataset.zip(
        (src_string_dataset, src_dataset, wgt_dataset, ctx_dataset,
         annot_dataset, trie_exclude_dataset))
    # Add in the annotations and word counts.
    src_dataset = src_dataset.map(
        lambda src_string, src, wgt, ctx, annot, tex:
        (src_string, src, wgt, ctx, annot, tex, tf.size(src), tf.size(ctx)))

    def batching_func(x):
        return x.padded_batch(
            batch_size,
            # The entry is the source line rows;
            # this has unknown-length vectors. The last entry is
            # the source row size; this is a scalar.
            padded_shapes=(
                tf.TensorShape([]),  # src_string
                tf.TensorShape([None]),  # src
                tf.TensorShape([]),  # wgt
                tf.TensorShape([None]),  # ctx
                tf.TensorShape([]),  # annot
                tf.TensorShape([]),  # tex
                tf.TensorShape([]),  # src_len
                tf.TensorShape([])),  # ctx_len
            # Pad the source and context sequences with eos tokens.
            # (Though notice we don't generally need to do this since
            # later on we will be masking out calculations past the true sequence.
            padding_values=(
                "",  # src_string
                src_eos_id,  # src
                1.0,  # wgt
                src_eos_id,  # ctx
                "",  # annot --unused
                "",  # tex --unused
                0,  # src_len -- unused
                0))  # ctx_len -- unused

    batched_dataset = batching_func(src_dataset)
    batched_iter = batched_dataset.make_initializable_iterator()
    (src_raw, src_ids, weights, ctx_ids, annot_strs, tex_strs, src_seq_len,
     ctx_seq_len) = batched_iter.get_next()
    return BatchedInput(initializer=batched_iter.initializer,
                        source_string=src_raw,
                        source=src_ids,
                        target_input=None,
                        target_output=None,
                        weights=weights,
                        context=ctx_ids,
                        annotation=annot_strs,
                        trie_exclude=tex_strs,
                        source_sequence_length=src_seq_len,
                        target_sequence_length=None,
                        context_sequence_length=ctx_seq_len)
accuracy = tf.reduce_mean(tf.reduce_min(accuracy_internal, 2))

accuracy_letter = tf.reduce_mean(tf.reshape(accuracy_internal, [-1]))

predict1 = tf.argmax(letter1, 1)
predict2 = tf.argmax(letter2, 1)
predict3 = tf.argmax(letter3, 1)
predict4 = tf.argmax(letter4, 1)

#生成最终结果
base_str = tf.constant("BCEFGHJKMPQRTVWXY2346789")
poses = tf.stack([predict1, predict2, predict3, predict4], axis=1)
length = tf.constant([1, 1, 1, 1], tf.int64)
predicts = tf.map_fn(lambda pos: tf.substr(base_str, pos, length), poses,
                     tf.string)
predict_join = tf.reduce_join(predicts, axis=1)

initer = tf.global_variables_initializer()
saver = tf.train.Saver()

sess = tf.Session()
sess.run(initer)
saver.restore(sess)

pickup = "BCEFGHJKMPQRTVWXY2346789"
reader = pd.read_source(souce_id=802522, iterator=True)

identity = np.identity(24)
for i in range(15000):
    #获取图片数据
    df = reader.get_chunk(500)
Example #26
0
def evaluate(path_to_checkpoint, ds, val_data, val_labels, num_examples,
             global_step):

    batch_size = 128
    num_batches = num_examples // batch_size
    needs_include_length = False

    with tf.Graph().as_default():
        with tf.name_scope('test_inputs'):
            xs = tf.placeholder(shape=[None, 54, 54, 3], dtype=tf.float32)
            ys1 = tf.placeholder(shape=[
                None,
            ], dtype=tf.int32)
            ys2 = tf.placeholder(shape=[None, 5], dtype=tf.int32)

        length_logits, digits_logits = Model.layers(xs, drop_rate=0.0)
        length_predictions = tf.argmax(length_logits, axis=1)
        digits_predictions = tf.argmax(digits_logits, axis=2)

        if needs_include_length:
            labels = tf.concat([tf.reshape(ys1, [-1, 1]), ys2], axis=1)
            predictions = tf.concat(
                [tf.reshape(length_predictions, [-1, 1]), digits_predictions],
                axis=1)
        else:
            labels = ys2
            predictions = digits_predictions

        labels_string = tf.reduce_join(tf.as_string(labels), axis=1)
        predictions_string = tf.reduce_join(tf.as_string(predictions), axis=1)

        accuracy, update_accuracy = tf.metrics.accuracy(
            labels=labels_string, predictions=predictions_string)

        tf.summary.image('image', xs)
        tf.summary.scalar('accuracy', accuracy)
        tf.summary.histogram(
            'variables',
            tf.concat(
                [tf.reshape(var, [-1]) for var in tf.trainable_variables()],
                axis=0))
        summary = tf.summary.merge_all()

        with tf.Session() as sess:
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            restorer = tf.train.Saver()
            restorer.restore(sess, path_to_checkpoint)

            for _ in range(num_batches):
                image_batch, label = ds.build_batch(val_data,
                                                    val_labels,
                                                    batch_size,
                                                    is_train=False,
                                                    shuffle=False)
                length_batch = label[:, 0]
                digits_batch = label[:, 1:6]

                acc, update = sess.run([accuracy, update_accuracy],
                                       feed_dict={
                                           xs: image_batch,
                                           ys1: length_batch,
                                           ys2: digits_batch
                                       })
            coord.request_stop()
            coord.join(threads)

    return acc
Example #27
0
def build_graph(checkpoint_dir,
                log_dir,
                batch_size,
                steps,
                dropout_rate=0,
                mode=tf.estimator.ModeKeys.TRAIN,
                learning_rate=None,
                decay_rate=None,
                decay_steps=None):
    g = tf.Graph()
    with g.as_default():
        y_usenow3 = tf.placeholder(tf.int32,
                                   shape=[batch_size, 1],
                                   name='usenow3')
        y_ecignow = tf.placeholder(tf.int32,
                                   shape=[batch_size, 1],
                                   name='ecignow')

        global_step = tf.train.get_or_create_global_step()

        if mode == ModeKeys.TRAIN:
            if learning_rate is None:
                raise Exception('learning_rate must be set in train mode')

            if (decay_steps is None and decay_rate is not None) or (
                    decay_rate is None and decay_steps is not None):
                raise Exception(
                    'decay_steps and decay_rate must both be set if one is set'
                )

            if decay_steps is not None and decay_rate is not None:
                learning_rate = tf.train.exponential_decay(
                    learning_rate,
                    global_step=global_step,
                    decay_steps=decay_steps,
                    decay_rate=decay_rate)

            examples = (float(batch_size) * tf.to_float(global_step))
        else:
            examples = float(batch_size)

        def fill(val):
            return tf.fill([
                batch_size,
            ], val)

        features = {
            'WEIGHT2': fill(9999),
            'HEIGHT3': fill(9999),
            'SEX': fill(9),
            'EMPLOY1': fill(-1),
            'INCOME2': fill(-1),
            'MARITAL': fill(-1),
            'EDUCA': fill(-1),
            'CHILDREN': fill(-1),
            '_AGEG5YR': fill(-1)
        }
        input_layer = tf.feature_column.input_layer(features, columns)
        model = MultitaskDNN(input_layer=input_layer,
                             labels_ndims=2,
                             hidden_units=[64, 32],
                             dropout_rate=dropout_rate)

        loss = model.loss([y_usenow3, y_ecignow])

        with tf.variable_scope('predictions_usenow3'):
            logistic_usenow3, probabilities_usenow3, class_ids_usenow3 = _prediction_variables(
                model.logits_layers[0])

        with tf.variable_scope('predictions_ecignow'):
            logistic_ecignow, probabilities_ecignow, class_ids_ecignow = _prediction_variables(
                model.logits_layers[1])

        predictions_usenow3_str = tf.reduce_join(
            tf.as_string(class_ids_usenow3))
        predictions_ecignow_str = tf.reduce_join(
            tf.as_string(class_ids_ecignow))

        labels_usenow3 = tf.reduce_join(tf.as_string(y_usenow3), axis=0)

        (usenow3_accuracy, usenow3_accuracy_update_op) = tf.metrics.accuracy(
            labels=labels_usenow3, predictions=predictions_usenow3_str)
        (ecignow_accuracy, ecignow_accuracy_update_op) = tf.metrics.accuracy(
            labels=tf.reduce_join(tf.as_string(y_ecignow), axis=0),
            predictions=predictions_ecignow_str)

        summary_ops = []

        summary_ops += _add_summaries(y_usenow3,
                                      class_ids_usenow3,
                                      family='usenow3',
                                      n_examples=examples)
        summary_ops += _add_summaries(y_ecignow,
                                      class_ids_ecignow,
                                      family='ecignow',
                                      n_examples=examples)

        (usenow3_precisions, usenow3_precisions_update_op) = \
            tf.metrics.precision_at_thresholds(labels=y_usenow3,
                                               predictions=logistic_usenow3,
                                               thresholds=[0.1, 0.5, 0.75])
        tf.summary.scalar('precision_at_0.1',
                          usenow3_precisions[0],
                          family='usenow3')
        tf.summary.scalar('precision_at_0.5',
                          usenow3_precisions[1],
                          family='usenow3')
        tf.summary.scalar('precision_at_0.75',
                          usenow3_precisions[2],
                          family='usenow3')
        tf.summary.scalar('loss', loss)
        tf.summary.scalar('usenow3_accuracy',
                          usenow3_accuracy,
                          family='usenow3')
        tf.summary.scalar('ecignow_accuracy',
                          ecignow_accuracy,
                          family='ecignow')
        tf.summary.histogram('probabilities',
                             probabilities_usenow3,
                             family='usenow3')
        tf.summary.histogram('probabilities',
                             probabilities_ecignow,
                             family='ecignow')

        if mode == ModeKeys.TRAIN:
            tf.summary.scalar('learning_rate', learning_rate)

        summary = tf.summary.merge_all()
        saver = tf.train.Saver()

        if mode == ModeKeys.TRAIN:
            save_increment = 1000
            optimizer = tf.train.GradientDescentOptimizer(learning_rate)
            train_op = optimizer.minimize(loss, global_step=global_step)
            checkpoint_saver_hook = tf.train.CheckpointSaverHook(
                checkpoint_dir=checkpoint_dir,
                save_steps=save_increment,
                saver=saver)
            summary_saver_hook = tf.train.SummarySaverHook(
                save_steps=save_increment,
                output_dir=log_dir,
                summary_op=summary)
            profiler_hook = tf.train.ProfilerHook(save_steps=save_increment,
                                                  output_dir=log_dir)
            stop_at_step_hook = tf.train.StopAtStepHook(num_steps=steps)
            logging_hook = tf.train.LoggingTensorHook(
                {
                    'loss': loss,
                    'usenow3_accuracy': usenow3_accuracy,
                    'ecignow_accuracy': ecignow_accuracy,
                    'usenow3_precision_at_thresholds': usenow3_precisions
                },
                every_n_iter=save_increment)

            hooks = [
                checkpoint_saver_hook, summary_saver_hook, profiler_hook,
                stop_at_step_hook, logging_hook
            ]

            ops = [
                global_step, train_op, loss, usenow3_accuracy_update_op,
                ecignow_accuracy_update_op, usenow3_precisions_update_op
            ] + summary_ops
        else:
            summary_saver_hook = tf.train.SummarySaverHook(save_steps=1,
                                                           output_dir=log_dir,
                                                           summary_op=summary)
            hooks = [summary_saver_hook]
            ops = [
                summary, usenow3_accuracy, ecignow_accuracy,
                usenow3_accuracy_update_op, ecignow_accuracy_update_op
            ] + summary_ops

        return g, saver, (features, y_usenow3, y_ecignow), hooks, ops
Example #28
0
    def evaluate(self, path_to_checkpoint, path_to_tfrecords_file,
                 num_examples, global_step):
        batch_size = 128
        num_batches = num_examples // batch_size
        needs_include_length = False

        with tf.Graph().as_default():
            image_batch, length_batch, digits_batch = build_batch(
                path_to_tfrecords_file,
                num_examples=num_examples,
                batch_size=batch_size,
                shuffled=False)
            length_logits, digits_logits = Model.inference(image_batch,
                                                           drop_rate=0.0)
            length_predictions = tf.argmax(length_logits, axis=1)
            digits_predictions = tf.argmax(digits_logits, axis=2)

            if needs_include_length:
                labels = tf.concat(
                    [tf.reshape(length_batch, [-1, 1]), digits_batch], axis=1)
                predictions = tf.concat([
                    tf.reshape(length_predictions, [-1, 1]), digits_predictions
                ],
                                        axis=1)
            else:
                labels = digits_batch
                predictions = digits_predictions

            labels_string = tf.reduce_join(tf.as_string(labels), axis=1)
            predictions_string = tf.reduce_join(tf.as_string(predictions),
                                                axis=1)

            accuracy, update_accuracy = tf.metrics.accuracy(
                labels=labels_string, predictions=predictions_string)

            tf.summary.image('image', image_batch)
            tf.summary.scalar('accuracy', accuracy)
            tf.summary.histogram(
                'variables',
                tf.concat([
                    tf.reshape(var, [-1]) for var in tf.trainable_variables()
                ],
                          axis=0))
            summary = tf.summary.merge_all()

            with tf.Session() as sess:
                sess.run([
                    tf.global_variables_initializer(),
                    tf.local_variables_initializer()
                ])
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)

                restorer = tf.train.Saver()
                restorer.restore(sess, path_to_checkpoint)

                for _ in range(num_batches):
                    sess.run(update_accuracy)

                accuracy_val, summary_val = sess.run([accuracy, summary])
                self.summary_writer.add_summary(summary_val,
                                                global_step=global_step)

                coord.request_stop()
                coord.join(threads)

        return accuracy_val
Example #29
0
def cnn_model_fn():
    x = tf.placeholder(tf.string, name='x')

    input_layer = _parse_function(x)
    input_layer = tf.reshape(input_layer, [-1, 224, 224, 1])
    net, _ = resnet_v2.resnet_v2_50(input_layer)

    y1 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y2 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y3 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y4 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y5 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y6 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])

    net = tf.squeeze(net, axis=[1, 2])
    letter1 = slim.fully_connected(net, num_outputs=33, activation_fn=None, scope='train')
    letter2 = slim.fully_connected(net, num_outputs=33, activation_fn=None, scope='train1')
    letter3 = slim.fully_connected(net, num_outputs=33, activation_fn=None, scope='train2')
    letter4 = slim.fully_connected(net, num_outputs=33, activation_fn=None, scope='train3')
    letter5 = slim.fully_connected(net, num_outputs=33, activation_fn=None, scope='train4')
    letter6 = slim.fully_connected(net, num_outputs=33, activation_fn=None, scope='train5')

    letter1_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y1, logits=letter1))
    letter2_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y2, logits=letter2))
    letter3_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y3, logits=letter3))
    letter4_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y4, logits=letter4))
    letter5_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y5, logits=letter5))
    letter6_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y6, logits=letter6))
    loss = letter1_cross_entropy + letter2_cross_entropy + letter3_cross_entropy + letter4_cross_entropy + letter5_cross_entropy + letter6_cross_entropy

    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
    train_op = slim.learning.create_train_op(loss, optimizer,
                                             summarize_gradients=True)
    tf.summary.scalar('loss', loss)

    predict_concat = tf.stack([tf.argmax(letter1, 1),
                               tf.argmax(letter2, 1),
                               tf.argmax(letter3, 1),
                               tf.argmax(letter4, 1),
                               tf.argmax(letter5, 1),
                               tf.argmax(letter6, 1)],
                              1)
    y_concat = tf.stack([tf.argmax(y1, 1),
                         tf.argmax(y2, 1),
                         tf.argmax(y3, 1),
                         tf.argmax(y4, 1),
                         tf.argmax(y5, 1),
                         tf.argmax(y6, 1)],
                        1)

    accuracy_internal = tf.cast(tf.equal(predict_concat, y_concat), tf.float32),
    accuracy = tf.reduce_mean(tf.reduce_min(accuracy_internal, 2))
    tf.summary.scalar("accuracy", accuracy)
    merged = tf.summary.merge_all()
    accuracy_letter = tf.reduce_mean(tf.reshape(accuracy_internal, [-1]))

    length = tf.constant([1, 1, 1, 1, 1, 1], tf.int64)
    predicts = tf.map_fn(lambda pos: tf.substr(pickup, pos, length), predict_concat, tf.string)
    predict_join = tf.reduce_join(predicts, axis=1)
    # tf.print(predict_join)

    initer = tf.global_variables_initializer()
    saver = tf.train.Saver()

    sess = tf.Session()
    sess.run(initer)
    ckpt = tf.train.get_checkpoint_state(model_path_path)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)


    train_writer = tf.summary.FileWriter(user_home + '/50logs')
    # saver.restore(sess, model_path)

    images = datasets.images #get_train_dataset(dataset_path)
    labels = datasets.labels
    test_images = datasets.test_images
    test_labels = datasets.test_labels
    n = 1
    ret_status = False
    for j in range(15000):
        if ret_status:
            break
        for i in range(len(images)):
            if ret_status:
                break
            image = images[i]
            label = labels[i]
            batch_y_1 = [identity[pickup.find(label[0])]]
            batch_y_2 = [identity[pickup.find(label[1])]]
            batch_y_3 = [identity[pickup.find(label[2])]]
            batch_y_4 = [identity[pickup.find(label[3])]]
            batch_y_5 = [identity[pickup.find(label[4])]]
            batch_y_6 = [identity[pickup.find(label[5])]]

            sess.run(train_op,
                     feed_dict={x: image, y1: batch_y_1, y2: batch_y_2, y3: batch_y_3, y4: batch_y_4, y5: batch_y_5,
                                y6: batch_y_6})

            n = n + 1

            if n % 100 == 0:
                saver.save(sess, model_path, global_step=n)
            # if i % 10 == 0:
                summary = sess.run(merged,feed_dict={x: image, y1: batch_y_1, y2: batch_y_2, y3: batch_y_3, y4: batch_y_4, y5: batch_y_5, y6: batch_y_6})
                train_writer.add_summary(summary, n)
                count = 0
                for k in range(len(test_images)):
                    image = test_images[k]
                    label = test_labels[k]
                    print(image)
                    print(label)
                    batch_y_1 = [identity[pickup.find(label[0])]]
                    batch_y_2 = [identity[pickup.find(label[1])]]
                    batch_y_3 = [identity[pickup.find(label[2])]]
                    batch_y_4 = [identity[pickup.find(label[3])]]
                    batch_y_5 = [identity[pickup.find(label[4])]]
                    batch_y_6 = [identity[pickup.find(label[5])]]

                    accuracy_letter_, accuracy_, predict_ = sess.run([accuracy_letter, accuracy, predict_join],
                                                                     feed_dict={x: image, y1: batch_y_1, y2: batch_y_2,
                                                                                y3: batch_y_3,
                                                                                y4: batch_y_4, y5: batch_y_5,
                                                                                y6: batch_y_6})
                    print(accuracy_letter_)
                    print("accuracy is ====>%f" % accuracy_)
                    print(predict_)
                    if accuracy_ == 1.00:
                        count += 1

                accuracy_ = count / len(test_labels)
                print(accuracy_)
                if accuracy_ > 0.9:
                    ret_status = True

                print(j, n)
                print("-" * 20)

    prediction_signature = tf.saved_model.signature_def_utils.predict_signature_def({'image_path': x},
                                                                                    {'label': predict_join})

    legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')

    builder = tf.saved_model.builder.SavedModelBuilder(user_home + '/model')
    builder.add_meta_graph_and_variables(
        sess, [tf.saved_model.tag_constants.SERVING],
        signature_def_map={
            tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                prediction_signature,
        },
        legacy_init_op=legacy_init_op)
    builder.save()
Example #30
0
    def get_accuracy(self, path_to_checkpoint, path_to_tfrecords_file, num_examples, global_step):

    """
        input: path_to_checkpoint => model checkpoint path
               path_to_tfrecords_file => tfrecords path
               num_samples => number of samples to be measured

        funct: evaluates the accuracy of the predicted values of the samples

        output: returns the total accuracy of the model on the sample data given

    """

        batch_size = 128
        num_batches = num_examples / batch_size
        needs_include_length = False

        with tf.Graph().as_default():

            # gets the batches of the evaluting data

            image_batch, length_batch, digits_batch = DataPreprocessor.build_batch(path_to_tfrecords_file,
                                                                         num_examples=num_examples,
                                                                         batch_size=batch_size,
                                                                         shuffled=False)

            length_logits, digits_logits = CNNModel.get_inference(image_batch, drop_rate=0.0)
            length_predictions = tf.argmax(length_logits, axis=1)
            digits_predictions = tf.argmax(digits_logits, axis=2)

            soft = tf.nn.softmax(digits_logits)
            coverage = tf.reduce_max(soft, reduction_indices=2)
            proba = tf.reduce_mean(coverage, axis=1)
            ones = 0.8*tf.ones_like(proba) 
            mask = tf.greater(proba, ones)

            # if length and batch to be concatenated then concatenates

            if needs_include_length:
                labels = tf.concat([tf.reshape(length_batch, [-1, 1]), digits_batch], axis=1)
                predictions = tf.concat([tf.reshape(length_predictions, [-1, 1]), digits_predictions], axis=1)
            else:
                labels = digits_batch
                predictions = digits_predictions

            labels_string = tf.reduce_join(tf.as_string(labels), axis=1)
            predictions_string = tf.reduce_join(tf.as_string(predictions), axis=1)



            labels_mask_string = tf.boolean_mask(labels_string, mask)
            predictions_mask_string = tf.boolean_mask(predictions_string, mask)
            
            coverage_size = tf.size(predictions_mask_string)/tf.size(predictions_string)


            # determining the accuracy of the evaluating data

            accuracy, update_accuracy = tf.metrics.accuracy(
                labels=labels_string,
                predictions=predictions_string
            )

            # determining the accuracy mask of the evaluating data

            accuracy_mask, update_accuracy_mask = tf.metrics.accuracy(
                labels=labels_mask_string,
                predictions=predictions_mask_string
            )

            tf.summary.image('image', image_batch)
            tf.summary.scalar('accuracy', accuracy)
            tf.summary.histogram('variables',
                                 tf.concat([tf.reshape(var, [-1]) for var in tf.trainable_variables()], axis=0))
            summary = tf.summary.merge_all()

            with tf.Session() as sess:
                sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)

                restorer = tf.train.Saver()
                restorer.restore(sess, path_to_checkpoint)

                for _ in xrange(num_batches):
                    sess.run([update_accuracy, update_accuracy_mask])

                accuracy_val, summary_val = sess.run([accuracy, summary])

                accuracy_mask_val, coverage_size_val = sess.run([accuracy_mask, coverage_size])

                self.summary_writer.add_summary(summary_val, global_step=global_step)

                coord.request_stop()
                coord.join(threads)

        return accuracy_val, accuracy_mask_val, coverage_size_val
Example #31
0
def cnn_model_fn():
    x = tf.placeholder(tf.string, name='x')

    input_layer = _parse_function(x)
    input_layer = tf.reshape(input_layer, [-1, 224, 224, 1])
    net, _ = resnet_v2.resnet_v2_50(input_layer)

    y1 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y2 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y3 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y4 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y5 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])
    y6 = tf.placeholder(tf.float32, shape=[None, RESNET_CLASSES])

    net = tf.squeeze(net, axis=[1, 2])
    letter1 = slim.fully_connected(net,
                                   num_outputs=33,
                                   activation_fn=None,
                                   scope='train')
    letter2 = slim.fully_connected(net,
                                   num_outputs=33,
                                   activation_fn=None,
                                   scope='train1')
    letter3 = slim.fully_connected(net,
                                   num_outputs=33,
                                   activation_fn=None,
                                   scope='train2')
    letter4 = slim.fully_connected(net,
                                   num_outputs=33,
                                   activation_fn=None,
                                   scope='train3')
    letter5 = slim.fully_connected(net,
                                   num_outputs=33,
                                   activation_fn=None,
                                   scope='train4')
    letter6 = slim.fully_connected(net,
                                   num_outputs=33,
                                   activation_fn=None,
                                   scope='train5')

    letter1_cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y1, logits=letter1))
    letter2_cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y2, logits=letter2))
    letter3_cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y3, logits=letter3))
    letter4_cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y4, logits=letter4))
    letter5_cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y5, logits=letter5))
    letter6_cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y6, logits=letter6))
    loss = letter1_cross_entropy + letter2_cross_entropy + letter3_cross_entropy + letter4_cross_entropy + letter5_cross_entropy + letter6_cross_entropy

    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
    train_op = slim.learning.create_train_op(loss,
                                             optimizer,
                                             summarize_gradients=True)

    predict_concat = tf.stack([
        tf.argmax(letter1, 1),
        tf.argmax(letter2, 1),
        tf.argmax(letter3, 1),
        tf.argmax(letter4, 1),
        tf.argmax(letter5, 1),
        tf.argmax(letter6, 1)
    ], 1)
    y_concat = tf.stack([
        tf.argmax(y1, 1),
        tf.argmax(y2, 1),
        tf.argmax(y3, 1),
        tf.argmax(y4, 1),
        tf.argmax(y5, 1),
        tf.argmax(y6, 1)
    ], 1)

    accuracy_internal = tf.cast(tf.equal(predict_concat, y_concat),
                                tf.float32),
    accuracy = tf.reduce_mean(tf.reduce_min(accuracy_internal, 2))
    accuracy_letter = tf.reduce_mean(tf.reshape(accuracy_internal, [-1]))

    length = tf.constant([1, 1, 1, 1, 1, 1], tf.int64)
    predicts = tf.map_fn(lambda pos: tf.substr(pickup, pos, length),
                         predict_concat, tf.string)
    predict_join = tf.reduce_join(predicts, axis=1)
    # tf.print(predict_join)

    initer = tf.global_variables_initializer()
    saver = tf.train.Saver()

    sess = tf.Session()
    sess.run(initer)
    ckpt = tf.train.get_checkpoint_state(model_path_path)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
    # saver.restore(sess, model_path)
    # """
    image = '/Users/geu/testimg/IDO4N4.jpeg'
    label = 'IDO4N4'
    batch_y_1 = [identity[pickup.find(label[0])]]
    batch_y_2 = [identity[pickup.find(label[1])]]
    batch_y_3 = [identity[pickup.find(label[2])]]
    batch_y_4 = [identity[pickup.find(label[3])]]
    batch_y_5 = [identity[pickup.find(label[4])]]
    batch_y_6 = [identity[pickup.find(label[5])]]

    accuracy_letter_, accuracy_, predict_ = sess.run(
        [accuracy_letter, accuracy, predict_join],
        feed_dict={
            x: image,
            y1: batch_y_1,
            y2: batch_y_2,
            y3: batch_y_3,
            y4: batch_y_4,
            y5: batch_y_5,
            y6: batch_y_6
        })
    print(accuracy_letter_)
    print("accuracy is ====>%f" % accuracy_)
    print(predict_)
Example #32
0
 def join_charcaters_fn(coords):
     return tf.reduce_join(characters_list[coords[0]:coords[1]])
Example #33
0
def main(_):
    path = FLAGS.path
    path_to_restore_checkpoint_file = FLAGS.restore_checkpoint
    files = getTestFiles(path)
    num = 0
    correct = 0
    image_paths = []
    number_predictions = []
    labels = []
    for key in files:
        print(key)
        # print files['1']
        class_num = len(files[key])
        num = num + class_num
        images = [tf.image.decode_jpeg(tf.read_file(image_dir), channels=3) for image_dir in files[key]]
        # print images[0], images[1]
        # image = tf.image.resize_image_with_crop_or_pad(image, 64, 64)
        for i in range(0, len(files[key])):
            im = Image.open(files[key][i])
            image_paths += [files[key][i]]
            width, height = im.size
            # resize method 1
            # images[i] = tf.image.crop_to_bounding_box(images[i], height / 5, width / 2 - 32, 64, 64)
            # resize method 2 (float image)
            # images[i] = tf.image.resize_image_with_crop_or_pad(images[i], int(min(width, height)/1.5), int(min(width, height)/1.5))
            # images[i] = tf.image.resize_images(images[i], [64, 64])/255.0
            # images[i] = tf.image.resize_images(images[i], [64, 64])
            # resize method 3 for cropped images
            images[i] = tf.image.resize_image_with_crop_or_pad(images[i], 54, 54)
	    # print str(i) + ' ' + files[key][i]
        # stack multiple images
        images = tf.stack(images)
        # image = tf.image.resize_images(image, [64, 64])
        # images = tf.reshape(images, [-1, 64, 64, 3])
        images = tf.image.convert_image_dtype(images, dtype=tf.float32)
        images = tf.multiply(tf.subtract(images, 0.5), 2)
        #images = tf.image.resize_images(images, [54, 54])
        images = tf.reshape(images, [-1, 54, 54, 3])
	#display(images[124])
        #figures = {'img' + str(i): images[i] for i in range(len(files[key]))}
        # print figures
        #plot_figures(figures)
        # images = tf.unstack(images)
        # print images
        # for image in images:
        length_logits, digits_logits = Model.inference(images, drop_rate=0.0)
        length_predictions = tf.argmax(length_logits, axis=1)
        digits_predictions = tf.argmax(digits_logits, axis=2)
        digits_predictions_string = tf.reduce_join(tf.as_string(digits_predictions), axis=1)

        with tf.Session() as sess:
            restorer = tf.train.Saver()
            restorer.restore(sess, path_to_restore_checkpoint_file)

            length_predictions_val, digits_predictions_string_val = sess.run(
                [length_predictions, digits_predictions_string])
            # print 'length: ', length_predictions_val
            # print 'digits: ', digits_predictions_string_val
            predictions = [int(d[:l]) for l, d in zip(length_predictions_val, digits_predictions_string_val)]
            # print predictions
            number_predictions += predictions
            labels += [int(key)] * class_num
            # class_correct = sum([1.0 for p in predictions if p == key])
            # correct = correct + class_correct
            # print 'Accuracy: ', class_correct / class_num
        # break
    acc, acc_op = tf.metrics.accuracy(labels = tf.Variable(labels), predictions = tf.Variable(number_predictions))
    # auc, auc_op = tf.metrics.auc(labels = tf.Variable(labels), predictions = tf.Variable(number_predictions))
    pre, pre_op = tf.metrics.precision(labels = tf.Variable(labels), predictions = tf.Variable(number_predictions))
    recall, recall_op = tf.metrics.recall(labels = tf.Variable(labels), predictions = tf.Variable(number_predictions))
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    tf.local_variables_initializer().run()
    sess.run(acc_op)
    #sess.run(auc_op)
    sess.run(pre_op)
    sess.run(recall_op)
    print(labels)
    print(number_predictions)
    # sess.run(init)    
    print("Number of test images: " + str(num) + ".")
    print("Accuracy: " + str(sess.run(acc)))
    print("Precision: " + str(sess.run(pre)))
    print("Recall: " + str(sess.run(recall)))
    confusion_matrix(image_paths, labels, number_predictions)
    def evaluate(self, path_to_restore_model_checkpoint_file,
                 path_to_restore_defender_checkpoint_file,
                 path_to_tfrecords_file, num_examples, global_step,
                 defend_layer, attacker_type):
        batch_size = 32
        num_batches = num_examples // batch_size
        needs_include_length = False

        with tf.Graph().as_default():
            image_batch, length_batch, digits_batch = Donkey.build_batch(
                path_to_tfrecords_file,
                num_examples=num_examples,
                batch_size=batch_size,
                shuffled=False)
            with tf.variable_scope('model'):
                length_logits, digits_logits, hidden_out = Model.inference(
                    image_batch,
                    drop_rate=0.0,
                    is_training=False,
                    defend_layer=defend_layer)
            with tf.variable_scope('defender'):
                recovered = Attacker.recover_hidden(attacker_type,
                                                    hidden_out,
                                                    is_training=False,
                                                    defend_layer=defend_layer)
            ssim = tf.reduce_mean(
                tf.abs(tf.image.ssim(image_batch, recovered, max_val=2)))
            length_predictions = tf.argmax(length_logits, axis=1)
            digits_predictions = tf.argmax(digits_logits, axis=2)

            if needs_include_length:
                labels = tf.concat(
                    [tf.reshape(length_batch, [-1, 1]), digits_batch], axis=1)
                predictions = tf.concat([
                    tf.reshape(length_predictions, [-1, 1]), digits_predictions
                ],
                                        axis=1)
            else:
                labels = digits_batch
                predictions = digits_predictions

            labels_string = tf.reduce_join(tf.as_string(labels), axis=1)
            predictions_string = tf.reduce_join(tf.as_string(predictions),
                                                axis=1)

            accuracy, update_accuracy = tf.metrics.accuracy(
                labels=labels_string, predictions=predictions_string)

            tf.summary.image('image', image_batch, max_outputs=20)
            tf.summary.image('recovered', recovered, max_outputs=20)
            tf.summary.scalar('ssim', ssim)
            tf.summary.scalar('accuracy', accuracy)
            tf.summary.histogram(
                'variables',
                tf.concat([
                    tf.reshape(var, [-1]) for var in tf.trainable_variables()
                ],
                          axis=0))
            summary = tf.summary.merge_all()

            with tf.Session() as sess:
                sess.run([
                    tf.global_variables_initializer(),
                    tf.local_variables_initializer()
                ])
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)

                model_saver = tf.train.Saver(var_list=tf.get_collection(
                    tf.GraphKeys.TRAINABLE_VARIABLES, scope='model'))
                defender_saver = tf.train.Saver(var_list=tf.get_collection(
                    tf.GraphKeys.TRAINABLE_VARIABLES, scope='defender'))
                model_saver.restore(sess,
                                    path_to_restore_model_checkpoint_file)
                print("Evaluation model restored from {}".format(
                    path_to_restore_model_checkpoint_file))
                defender_saver.restore(
                    sess, path_to_restore_defender_checkpoint_file)

                for _ in range(num_batches):
                    sess.run(update_accuracy)

                accuracy_val, summary_val = sess.run([accuracy, summary])
                self.summary_writer.add_summary(summary_val,
                                                global_step=global_step)

                coord.request_stop()
                coord.join(threads)

        return accuracy_val
Example #35
0
def join_beams(tokens, tokens_lengths, separator='\n'):
    beams = join_tokens_beam(tokens, tokens_lengths)
    joined = tf.reduce_join(beams, axis=1, separator=separator)
    joined = tf.reshape(joined, [-1, 1])
    return joined
 def testInvalidReductionIndices(self):
   with self.test_session():
     with self.assertRaisesRegexp(ValueError, "scalar"):
       tf.reduce_join(inputs="", reduction_indices=0)
     with self.assertRaisesRegexp(ValueError,
                                  "Invalid reduction dimension -3"):
       tf.reduce_join(inputs=[[""]], reduction_indices=-3)
     with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
       tf.reduce_join(inputs=[[""]], reduction_indices=2)
     with self.assertRaisesRegexp(ValueError,
                                  "Invalid reduction dimension -3"):
       tf.reduce_join(inputs=[[""]], reduction_indices=[0, -3])
     with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
       tf.reduce_join(inputs=[[""]], reduction_indices=[0, 2])
     with self.assertRaisesRegexp(ValueError, "Duplicate reduction index 0"):
       tf.reduce_join(inputs=[[""]], reduction_indices=[0, 0])
Example #37
0
    def read_file(self, example):

        example_fmt = {
            "imagename":
            tf.FixedLenFeature((), tf.string, ""),
            "pts2d_68":
            tf.FixedLenFeature(
                (self.args.gt_num_lmks * 2),
                tf.float32,
                default_value=list(np.zeros(self.args.gt_num_lmks * 2)))
        }

        parsed = tf.parse_single_example(example, example_fmt)

        label = tf.reshape(parsed["pts2d_68"], [-1, 2])

        img_string = tf.read_file(
            tf.reduce_join(
                [tf.constant(self.args.data_eval_dir), parsed["imagename"]]))

        image = tf.image.decode_image(tf.reshape(img_string, shape=[]),
                                      channels=3)
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        h = tf.shape(image)[0]
        w = tf.shape(image)[1]

        label = label - 1
        h = tf.shape(image)[0]
        w = tf.shape(image)[1]

        xmin = tf.reduce_min(label[:, 0], axis=0)
        ymin = tf.reduce_min(label[:, 1], axis=0)
        xmax = tf.reduce_max(label[:, 0], axis=0)
        ymax = tf.reduce_max(label[:, 1], axis=0)

        bbox_h = ymax - ymin
        bbox_w = xmax - xmin

        scale_factor = 0.01
        y1 = (ymin - bbox_h * scale_factor) / tf.cast(h - 1, tf.float32)
        x1 = (xmin - bbox_w * scale_factor) / tf.cast(w - 1, tf.float32)
        y2 = (ymax + bbox_h * scale_factor) / tf.cast(h - 1, tf.float32)
        x2 = (xmax + bbox_w * scale_factor) / tf.cast(w - 1, tf.float32)

        image = tf.squeeze(tf.image.crop_and_resize(
            tf.expand_dims(image, 0),
            boxes=[[y1, x1, y2, x2]],
            box_ind=[0],
            crop_size=[self.args.IMAGE_SIZE[0], self.args.IMAGE_SIZE[1]],
            method='bilinear'),
                           axis=0)

        corner = tf.stack([
            x1 * tf.cast(w - 1, tf.float32) * tf.ones(self.args.num_lmks),
            y1 * tf.cast(h - 1, tf.float32) * tf.ones(self.args.num_lmks)
        ],
                          axis=1)

        scale = tf.stack([tf.ones(self.args.num_lmks, dtype=tf.float32) * (self.args.IMAGE_SIZE[1]-1) \
              / ((x2 - x1) * tf.cast(w - 1, tf.float32)),
               tf.ones(self.args.num_lmks, dtype=tf.float32) * (self.args.IMAGE_SIZE[0]-1) \
              / ((y2 - y1) * tf.cast(h - 1, tf.float32))], axis=1)

        image.set_shape([self.args.IMAGE_SIZE[0], self.args.IMAGE_SIZE[1], 3])

        return image, label + 1, corner, scale
 def testInvalidReductionIndices(self):
     with self.test_session():
         with self.assertRaisesRegexp(ValueError, "scalar"):
             tf.reduce_join(inputs="", reduction_indices=0)
         with self.assertRaisesRegexp(ValueError,
                                      "Invalid reduction dimension -3"):
             tf.reduce_join(inputs=[[""]], reduction_indices=-3)
         with self.assertRaisesRegexp(ValueError,
                                      "Invalid reduction dimension 2"):
             tf.reduce_join(inputs=[[""]], reduction_indices=2)
         with self.assertRaisesRegexp(ValueError,
                                      "Invalid reduction dimension -3"):
             tf.reduce_join(inputs=[[""]], reduction_indices=[0, -3])
         with self.assertRaisesRegexp(ValueError,
                                      "Invalid reduction dimension 2"):
             tf.reduce_join(inputs=[[""]], reduction_indices=[0, 2])
         with self.assertRaisesRegexp(ValueError,
                                      "Duplicate reduction index 0"):
             tf.reduce_join(inputs=[[""]], reduction_indices=[0, 0])
Example #39
0
    for grad, var in grads:
        if grad is not None:
            tf.summary.histogram(var.op.name + '/grads', grad)

    # Evaluate model accuracy
    with tf.name_scope('predict'):
        pred_correct = tf.equal(tf.argmax(pred, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(pred_correct, tf.float32))
        accuracy_summary = tf.summary.scalar("accuracy_summary", accuracy)

    # Cloud ML online prediction
    with tf.name_scope('serving'):
        # Input is a base64 string
        image_bytes = tf.placeholder(tf.string)
        rgb_image = tf.image.decode_jpeg(tf.reduce_join(image_bytes, []),
                                         channels=FLAGS.image_channels)
        rgb_image = tf.image.convert_image_dtype(rgb_image, dtype=tf.float32)
        rgb_image = tf.image.resize_images(
            rgb_image, [FLAGS.image_size, FLAGS.image_size])
        image_batch = tf.expand_dims(rgb_image, 0)

        # Define tensor inputs to use during prediction
        inputs = {'image_bytes': image_bytes.name}
        tf.add_to_collection('inputs', json.dumps(inputs))

        softmax_pred = tf.nn.softmax(
            conv_net(image_batch, weights, biases, FLAGS.image_size, 1.0))

        # Define tensor outputs for prediction
        outputs = {'scores': softmax_pred.name}
Example #40
0
def main(_):
    # Data File
    train_file_dir = FLAGS.train_file_dir
    test_file_dir = FLAGS.test_file_dir
    item_tower_file = FLAGS.item_tower_file

    # Hyper Parameters
    learning_rate = FLAGS.learning_rate
    batch_size = FLAGS.batch_size
    item_embedding_size = FLAGS.item_embedding_size
    cate_embedding_size = FLAGS.cate_embedding_size
    tag_embedding_size = FLAGS.tag_embedding_size
    is_train = FLAGS.is_train
    output_table = FLAGS.output_table
    saved_model_dir = FLAGS.saved_model_dir
    checkpoint_dir = FLAGS.checkpointDir
    oss_bucket_dir = FLAGS.buckets
    local = FLAGS.local
    recall_cnt_file = FLAGS.recall_cnt_file
    top_k_num = int(FLAGS.top_k_num)
    neg_sample_num = int(FLAGS.neg_sample_num)
    print("train_file_dir: %s" % train_file_dir)
    print("test_file_dir: %s" % test_file_dir)
    print("is_train: %d" % is_train)
    print("learning_rate: %f" % learning_rate)
    print("item_embedding_size: %d" % item_embedding_size)
    print("cate_embedding_size: %d" % cate_embedding_size)
    print("tag_embedding_size: %d" % tag_embedding_size)
    print("batch_size: %d" % batch_size)
    print("output table name: %s " % output_table)
    print("checkpoint_dir: %s " % checkpoint_dir)
    print("oss bucket dir: %s" % oss_bucket_dir)
    print("recall_cnt_file: %s" % recall_cnt_file)

    if local:
        # summary_dir = "../summary/"
        # recall_cnt_file = "../data/youtube_recall_item_cnt*"
        pass
    else:
        # oss_bucket_dir = "oss://ivwen-recsys.oss-cn-shanghai-internal.aliyuncs.com/"
        # summary_dir = oss_bucket_dir + "experiment/summary/"
        train_file_dir = oss_bucket_dir + train_file_dir
        test_file_dir = oss_bucket_dir + test_file_dir
        recall_cnt_file = oss_bucket_dir + recall_cnt_file
        item_tower_file = oss_bucket_dir + item_tower_file
        saved_model_dir = oss_bucket_dir + saved_model_dir

    # get item cnt
    # item_count, cate_count, tag_count = utils.get_item_cnt(recall_cnt_file)
    # item_tower_file = [utils.get_file_name(item_tower_file)]
    # print("item tower file: %s" % item_tower_file)
    # print("item_count: ", item_count)
    # print("cate_count: ", cate_count)
    # print("tag_count: ", tag_count)
    print("saved_model_dir: %s " % saved_model_dir)
    # GPU config
    # gpu_config = tf.ConfigProto()
    # gpu_config.gpu_options.allow_growth = True
    #

    with tf.Session() as sess:
        train_file_name = utils.get_file_name(train_file_dir)
        test_file_dir = utils.get_file_name(test_file_dir)

        two_tower_model = TwoTowerModelFRV2(
            train_file_dir=train_file_name,
            test_file_dir=test_file_dir,
            # item_tower_file=item_tower_file,
            is_train=is_train,
            item_embedding_size=item_embedding_size,
            cate_embedding_size=cate_embedding_size,
            tag_embedding_size=tag_embedding_size,
            batch_size=batch_size,
            learning_rate=learning_rate,
            local=local,
            # item_count=item_count,
            # cate_count=cate_count,
            # tag_count=tag_count,
            output_table=output_table,
            top_k_num=top_k_num,
            neg_sample_num=neg_sample_num,
            sess=sess)
        try:
            loss_sum = 0.0
            pred_step = 0

            if is_train == 1:
                train, losses = two_tower_model.train_model()
                sess.run(tf.global_variables_initializer())
                # sess.run(tf.tables_initializer())
                sess.run(tf.local_variables_initializer())
                sess.run(two_tower_model.training_init_op)
            else:
                two_tower_model.restore_model(sess, checkpoint_dir)
                # sess.run(tf.global_variables_initializer())
                # youtube_dnn_model.load_saved_model(sess, savedmodel_dir)
                print("restore model finished!!")

                two_tower_model.predict_topk_score()
                if local == 0:
                    writer = two_tower_model.write_table()

                # # 读取历史参数,保证增量更新
                # if tf.gfile.Exists(checkpoint_dir):
                #     two_tower_model.restore_model(sess, checkpoint_dir)

            while True:
                # # 更新数据到 timeline 的第一种种方式
                # trace = timeline.Timeline(step_stats=run_metadata.step_stats)
                # trace_file = open('timeline.ctf.json', 'w')
                # trace_file.write(trace.generate_chrome_trace_format())

                # 更新数据到profiler
                # my_profiler.add_step(step=int(train_step), run_meta=run_metadata)
                if is_train:
                    train_step = two_tower_model.global_step.eval()

                    # #
                    # _, loss, learning_rate = sess.run([train, losses, two_tower_model.learning_rate],
                    #                                   run_metadata=run_metadata, options=run_options)

                    _, loss, learning_rate = sess.run(
                        [train, losses, two_tower_model.learning_rate])
                    loss_sum += loss
                    if train_step % two_tower_model.PRINT_STEP == 0:
                        if train_step == 0:
                            print(
                                'time: %s\tEpoch: %d\tGlobal_Train_Step: %d\tTrain_loss: %.8f\tLearning_rate:%.8f'
                                % (time.strftime("%Y-%m-%d %H:%M:%S",
                                                 time.localtime()),
                                   two_tower_model.train_epoches, train_step,
                                   loss_sum, learning_rate))

                        else:
                            print(
                                'time: %s\tEpoch: %d\tGlobal_Train_Step: %d\tTrain_loss: %.8f\tLearning_rate:%.8f'
                                % (time.strftime("%Y-%m-%d %H:%M:%S",
                                                 time.localtime()),
                                   two_tower_model.train_epoches, train_step,
                                   loss_sum / two_tower_model.PRINT_STEP,
                                   learning_rate))
                            if train_step % two_tower_model.SAVE_STEP == 0:
                                two_tower_model.save_model(sess=sess,
                                                           path=checkpoint_dir)
                        loss_sum = 0.0

                    # local test
                    # user_ids, user_click_hash, target_ids, target_hash = sess.run(
                    #     [
                    #         two_tower_model.user_item_click_avg_embed,
                    #         two_tower_model.gender_one_hot,
                    #         two_tower_model.target_item_list,
                    #         two_tower_model.target_item_idx,
                    #     ])
                    # print(user_ids)
                    # print(user_click_hash)
                    # print(target_ids)
                    # print(target_hash)
                    # print(label.shape)
                    # break
                else:
                    if local == 0:
                        sess.run(writer)
                    else:
                        user_embeddings, item_embeddings, logits = sess.run([
                            tf.reduce_join(tf.as_string(
                                two_tower_model.user_embedding_final),
                                           1,
                                           separator=","),
                            tf.reduce_join(tf.as_string(
                                two_tower_model.item_embeding_final),
                                           1,
                                           separator=","),
                            two_tower_model.user_topk_item
                        ])
                        print("users:")
                        print(user_embeddings)
                        print("items:")
                        print(item_embeddings)
                        print("logits:")
                        print(logits)
                    if pred_step % 1000 == 0:
                        print("%d finished" % pred_step)
                    pred_step += 1

        except tf.errors.OutOfRangeError:
            print('time: %s\t %d records copied' %
                  (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                   two_tower_model.global_step.eval()))
            # if is_train == 1:
            #     two_tower_model.save_model(sess=sess, path=checkpoint_dir)

            # profile_code_builder = option_builder.ProfileOptionBuilder()
            # # profile_code_builder.with_node_names(show_name_regexes=['main.*'])
            # profile_code_builder.with_min_execution_time(min_micros=15)
            # profile_code_builder.select(['micros'])  # 可调整为 'bytes', 'occurrence'
            # profile_code_builder.order_by('micros')
            # profile_code_builder.with_max_depth(6)
            # my_profiler.profile_python(profile_code_builder.build())
            # my_profiler.profile_operations(profile_code_builder.build())
            # my_profiler.profile_name_scope(profile_code_builder.build())
            # my_profiler.profile_graph(profile_code_builder.build())

        if is_train == 1:
            print("time: %s\tckpt model save start..." %
                  time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            two_tower_model.save_model(sess=sess, path=checkpoint_dir)
            print("time: %s\tsave_model save start..." %
                  time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            two_tower_model.save_model_as_savedmodel(
                sess=sess,
                dir=saved_model_dir,
                inputs=two_tower_model.saved_model_inputs,
                outputs=two_tower_model.saved_model_outputs)
Example #41
0
 def _detokenize_tensor(self, tokens):
     return tf.reduce_join(tokens, axis=0, separator=" ")
Example #42
0
sparse_decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, seqs_len)

dense_decoded = tf.sparse_to_dense(sparse_decoded[0].indices,
                                              sparse_decoded[0].dense_shape,
                                              sparse_decoded[0].values)

sess.run(tf.global_variables_initializer())

table = tf.contrib.lookup.index_to_string_table_from_file(params.get('char2idx'),key_column_index = 0,
                                                          value_column_index = 1, delimiter = '\t',
                                                          default_value=' ')
tf.tables_initializer().run(session = sess)

expected_chars = table.lookup(dense_decoded)
join_expected = tf.reduce_join(expected_chars, separator = '', axis = 1)

test = dense_decoded.eval(session = sess)

#test_seqs_len = length(test)

print(test.shape)
#print(test_seqs_len.eval(session = sess))
#


#
#logits = tf.transpose(features['logits'], (1,0,2))
#
#
#
Example #43
0
def tensor_predict(words_list):
    num_classes = FLAGS.num_classes
    num_layers = FLAGS.num_layers
    num_steps = FLAGS.num_steps
    embedding_size = FLAGS.embedding_size
    hidden_size = FLAGS.hidden_size
    keep_prob = FLAGS.keep_prob
    vocab_size = FLAGS.vocab_size
    vocab_path = FLAGS.vocab_path
    prop_limit = FLAGS.prop_limit
    checkpoint_path = FLAGS.checkpoint_path

    # split 1-D String dense Tensor to words SparseTensor
    sentences = tf.placeholder(dtype=tf.string,
                               shape=[None],
                               name='input_sentences')
    sparse_words = tf.string_split(sentences, delimiter=' ')

    # slice SparseTensor
    valid_indices = tf.less(sparse_words.indices,
                            tf.constant([num_steps], dtype=tf.int64))
    valid_indices = tf.reshape(
        tf.split(valid_indices, [1, 1], axis=1)[1], [-1])
    valid_sparse_words = tf.sparse_retain(sparse_words, valid_indices)

    excess_indices = tf.greater_equal(sparse_words.indices,
                                      tf.constant([num_steps], dtype=tf.int64))
    excess_indices = tf.reshape(
        tf.split(excess_indices, [1, 1], axis=1)[1], [-1])
    excess_sparse_words = tf.sparse_retain(sparse_words, excess_indices)

    # sparse to dense
    words = tf.sparse_to_dense(
        sparse_indices=valid_sparse_words.indices,
        output_shape=[valid_sparse_words.dense_shape[0], num_steps],
        sparse_values=valid_sparse_words.values,
        default_value='_PAD')

    # dict words to token ids
    # with open(os.path.join(vocab_path, 'words_vocab.txt'), 'r') as data_file:
    #   words_table_list = [line.strip() for line in data_file if line.strip()]
    # words_table_tensor = tf.constant(words_table_list, dtype=tf.string)
    # words_table = lookup.index_table_from_tensor(mapping=words_table_tensor, default_value=3)
    words_table = lookup.index_table_from_file(os.path.join(
        vocab_path, 'words_vocab.txt'),
                                               default_value=3)
    words_ids = words_table.lookup(words)

    # blstm model predict
    with tf.variable_scope('model', reuse=None):
        logits, _ = model.inference(words_ids,
                                    valid_sparse_words.dense_shape[0],
                                    num_steps,
                                    vocab_size,
                                    embedding_size,
                                    hidden_size,
                                    keep_prob,
                                    num_layers,
                                    num_classes,
                                    is_training=False)

    # using softmax
    # props = tf.nn.softmax(logits)
    # max_prop_values, max_prop_indices = tf.nn.top_k(props, k=1)
    # predict_scores = tf.reshape(max_prop_values, shape=[-1, num_steps])
    # predict_labels_ids = tf.reshape(max_prop_indices, shape=[-1, num_steps])
    # predict_labels_ids = tf.to_int64(predict_labels_ids)

    # using crf
    logits = tf.reshape(logits, shape=[-1, num_steps, num_classes])
    transition_params = tf.get_variable("transitions",
                                        [num_classes, num_classes])
    sequence_length = tf.constant(num_steps,
                                  shape=[logits.get_shape()[0]],
                                  dtype=tf.int64)
    predict_labels_ids, _ = crf_utils.crf_decode(logits, transition_params,
                                                 sequence_length)
    predict_labels_ids = tf.to_int64(predict_labels_ids)
    predict_scores = tf.constant(1.0,
                                 shape=predict_labels_ids.get_shape(),
                                 dtype=tf.float32)

    # replace untrusted prop that less than prop_limit
    trusted_prop_flag = tf.greater_equal(
        predict_scores, tf.constant(prop_limit, dtype=tf.float32))
    replace_prop_labels_ids = tf.to_int64(
        tf.fill(tf.shape(predict_labels_ids), 4))
    predict_labels_ids = tf.where(trusted_prop_flag, predict_labels_ids,
                                  replace_prop_labels_ids)

    # dict token ids to labels
    # with open(os.path.join(vocab_path, 'labels_vocab.txt'), 'r') as data_file:
    #   labels_table_list = [line.strip() for line in data_file if line.strip()]
    # labels_table_tensor = tf.constant(labels_table_list, dtype=tf.string)
    # labels_table = lookup.index_to_string_table_from_tensor(mapping=labels_table_tensor, default_value='O')
    labels_table = lookup.index_to_string_table_from_file(os.path.join(
        vocab_path, 'labels_vocab.txt'),
                                                          default_value='O')
    predict_labels = labels_table.lookup(predict_labels_ids)

    # extract real blstm predict label in dense and save to sparse
    valid_sparse_predict_labels = tf.SparseTensor(
        indices=valid_sparse_words.indices,
        values=tf.gather_nd(predict_labels, valid_sparse_words.indices),
        dense_shape=valid_sparse_words.dense_shape)

    # create excess label SparseTensor with 'O'
    excess_sparse_predict_labels = tf.SparseTensor(
        indices=excess_sparse_words.indices,
        values=tf.fill(tf.shape(excess_sparse_words.values), 'O'),
        dense_shape=excess_sparse_words.dense_shape)

    # concat SparseTensor
    sparse_predict_labels = tf.SparseTensor(
        indices=tf.concat(axis=0,
                          values=[
                              valid_sparse_predict_labels.indices,
                              excess_sparse_predict_labels.indices
                          ]),
        values=tf.concat(axis=0,
                         values=[
                             valid_sparse_predict_labels.values,
                             excess_sparse_predict_labels.values
                         ]),
        dense_shape=excess_sparse_predict_labels.dense_shape)
    sparse_predict_labels = tf.sparse_reorder(sparse_predict_labels)

    # join SparseTensor to 1-D String dense Tensor
    # remain issue, num_split should equal the real size, but here limit to 1
    join_labels_list = []
    slice_labels_list = tf.sparse_split(sp_input=sparse_predict_labels,
                                        num_split=1,
                                        axis=0)
    for slice_labels in slice_labels_list:
        slice_labels = slice_labels.values
        join_labels = tf.reduce_join(slice_labels,
                                     reduction_indices=0,
                                     separator=' ')
        join_labels_list.append(join_labels)
    format_predict_labels = tf.stack(join_labels_list, name='predict_labels')

    saver = tf.train.Saver()
    tables_init_op = tf.tables_initializer()
    with tf.Session() as sess:
        sess.run(tables_init_op)
        ckpt = tf.train.get_checkpoint_state(checkpoint_path)
        if ckpt and ckpt.model_checkpoint_path:
            print('read model from {}'.format(ckpt.model_checkpoint_path))
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('No checkpoint file found at %s' % checkpoint_path)
            return
        # crf tensor
        predict_labels_list = sess.run(format_predict_labels,
                                       feed_dict={sentences: words_list})
        # save graph into .pb file
        graph = tf.graph_util.convert_variables_to_constants(
            sess, sess.graph_def, ["init_all_tables", "predict_labels"])
        tf.train.write_graph(graph, '.', 'ner_graph.pb', as_text=False)
        return predict_labels_list