Beispiel #1
0
 def test(self):
     content = tf.placeholder('float',[1,304,304,3])
     style = tf.placeholder('float',[1,304,304,3])
     
     content_encode = self.encoder.encoder(content,self.target_layer)
     style_encode = self.encoder.encoder(style,self.target_layer)
     
     blended = wct_tf(content_encode,style_encode,self.alpha)
     #blended = Adain(content_encode,style_encode)
     
     stylized = self.decoder.decoder(blended,self.target_layer)
     saver = tf.train.Saver()
     
     with tf.Session()as sess:
          tf.global_variables_initializer().run()
          tf.local_variables_initializer().run()
          saver.restore(sess,self.decoder_weights)
          img_c = image.load_img(self.content_path,target_size=(304,304,3))
          img_c = image.img_to_array(img_c)
          img_c = np.expand_dims(img_c,axis=0)
          
          img_s = image.load_img(self.style_path,target_size = (304,304,3))
          img_s = image.img_to_array(img_s)
          img_s = np.expand_dims(img_s,axis=0)    
          
          feed_dict = {content : img_c , style : img_s}
          
          result,e = sess.run([stylized,content_encode],feed_dict= feed_dict)
          result = result[0]
          result = np.clip(result,0,255)/255.
          #print(e)
          imsave(self.output_path,result)
Beispiel #2
0
def main():
    args = parse_arguments()

    # Dataset
    dataset = Dataset(**vars(args))

    # Reset the default graph and set a graph-level seed
    tf.reset_default_graph()
    tf.set_random_seed(9)

    # Model
    model = Model(num_classes=dataset.num_classes, **vars(args))
    model.construct_model()

    # Session
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    tf.local_variables_initializer().run()

    # Prune
    prune.prune(args, model, sess, dataset)

    # Train and test
    train.train(args, model, sess, dataset)
    test.test(args, model, sess, dataset)

    sess.close()
    sys.exit()
 def config_model(self):
     self.build_hetero_model()
     self.get_latent_rep()
     self.SGNN_loss()
     self.train_step_neg = tf.train.AdamOptimizer(1e-3).minimize(self.negative_sum)
     self.sess = tf.InteractiveSession()
     tf.global_variables_initializer().run()
     tf.local_variables_initializer().run()
Beispiel #4
0
    def train(self):
        #输入向量
        inputs = tf.placeholder('float',[None,224,224,3])
        #输出向量
        outputs = tf.placeholder('float',[None,224,224,3])
        #encoded是图片经过编码器的结果,decoded是图片又经过解码器的结果,decoded_encoded生成新图片经过编码器的结果
        encoded,decoded,decoded_encoded = self.encoder_decoder(inputs)
        #内容损失 重建图像和原图(outputs,decoded)
        pixel_loss = tf.losses.mean_squared_error(decoded,outputs)
        #风格损失 原图经过编码器(encoded)和重建图像经过解码器(decoded_encoded)
        feature_loss = tf.losses.mean_squared_error(decoded_encoded,encoded)
        # loss = pixel_loss+ feature_loss
        loss=0.5*pixel_loss + 0.1*feature_loss
        opt= tf.train.AdamOptimizer(0.0001).minimize(loss)
        #训练集的位置
        tfrecords_filename =  self.tfrecord_path
        filename_queue = tf.train.string_input_producer([tfrecords_filename],num_epochs=100)

        reader = tf.TFRecordReader()  
        _, serialized_example = reader.read(filename_queue)

        feature2 = {  
                    'image_raw': tf.FixedLenFeature([], tf.string)} 
        features = tf.parse_single_example(serialized_example, features=feature2)  
        image = tf.decode_raw(features['image_raw'], tf.uint8) 
        image = tf.reshape(image,[224,224,3])   
        images = tf.train.shuffle_batch([image], batch_size=self.batch_size, capacity=30, min_after_dequeue=10)
        
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config = config)as sess  :
             tf.global_variables_initializer().run()
             tf.local_variables_initializer().run()
        

        
             coord = tf.train.Coordinator()  
             threads = tf.train.start_queue_runners(coord=coord)  
  
             saver = tf.train.Saver()
             

             for i in range (self.max_iterator):
                 batch_x=sess.run(images)
                 feed_dict = {inputs:batch_x, outputs : batch_x}
            
                 _,p_loss,f_loss,reconstruct_imgs=sess.run([opt,pixel_loss,feature_loss,decoded],feed_dict=feed_dict)
            
                 print('step %d |  pixel_loss is %f   | feature_loss is %f  |'%(i,p_loss,f_loss))
            
                 if i % 5 ==0:
                    result_img = np.clip(reconstruct_imgs[0],0,255).astype(np.uint8)
                    imsave('result.jpg',result_img)
                
             saver.save(sess,self.checkpoint_path)
             coord.request_stop()  
             coord.join(threads)
Beispiel #5
0
def load_entity_matrices(base_dir):
  """Load entity co-occurrence and co-reference matrices."""
  cooccur_ckpt = os.path.join(base_dir, "ent2ment.npz")
  coref_ckpt = os.path.join(base_dir, "coref.npz")

  tf.reset_default_graph()
  co_data, co_indices, co_rowsplits = search_utils.load_ragged_matrix(
      "ent2ment", cooccur_ckpt)
  coref_map = search_utils.load_database(
      "coref", None, coref_ckpt, dtype=tf.int32)

  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    tf.logging.info("Loading ragged matrix...")
    np_data, np_indices, np_indptr = sess.run(
        [co_data, co_indices, co_rowsplits])
    tf.logging.info("Loading coref map...")
    np_coref = sess.run(coref_map)
    num_entities = np_indptr.shape[0] - 1
    num_mentions = np_coref.shape[0]
    tf.logging.info("Creating sparse matrix %d x %d...", num_entities,
                    num_mentions)
    sp_cooccur = sp.csr_matrix((np_data, np_indices, np_indptr),
                               shape=(num_entities, num_mentions))
    tf.logging.info("Creating sparse matrix %d x %d...", num_mentions,
                    num_entities)
    sp_coref = sp.csr_matrix((np.ones_like(np_coref, dtype=np.int32),
                              (np.arange(np_coref.shape[0]), np_coref)),
                             shape=(num_mentions, num_entities))

  metadata_file = os.path.join(base_dir, "entities.json")
  entity2id, _ = json.load(tf.gfile.Open(metadata_file))

  return sp_cooccur, sp_coref, entity2id
Beispiel #6
0
def _get_scaffold(additional_initializers):
    return tf.train.Scaffold(
        init_op=control_flow_ops.group(tf.global_variables_initializer(),
                                       *additional_initializers),
        local_init_op=tf.group(tf.local_variables_initializer(),
                               tf.train.Scaffold.default_local_init_op(),
                               *additional_initializers))
Beispiel #7
0
def gen_poem(begin_word):


    saver = tf.train.Saver(tf.global_variables())
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    with tf.Session() as sess:
        sess.run(init_op)

        checkpoint = tf.train.latest_checkpoint(FLAG.result_dir)
        saver.restore(sess, checkpoint)

        x = np.array([list(map(word_int_map.get, FLAG.start_token))])

        [predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],
                                         feed_dict={input_data: x})
        if begin_word:
            word = begin_word
        else:
            word = to_word(predict, vocabularies)
        poem_ = ''

        i = 0
        while word != FLAG.end_token:
            poem_ += word
            i += 1
            if i >= 3:
                break
            x = np.zeros((1, 1))
            x[0, 0] = word_int_map[word]
            [predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],
                                             feed_dict={input_data: x, end_points['initial_state']: last_state})
            word = to_word(predict, vocabularies)
        sess.close()
        return poem_
  def test_next_production_rule_accuracy_with_length(self):
    partial_sequence_lengths = tf.placeholder(tf.int32, shape=[None])
    value, update_op = metrics.next_production_rule_accuracy(
        next_production_rules=self.next_production_rules,
        predict_next_production_rules=self.predict_next_production_rules,
        partial_sequence_lengths=partial_sequence_lengths,
        target_length=1)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())
      sess.run(
          update_op,
          feed_dict={
              self.next_production_rules:
                  self.next_production_rules_values_0,
              self.predict_next_production_rules:
                  self.predict_next_production_rules_values_0,
              partial_sequence_lengths: [42, 42, 1, 42]})
      self.assertAlmostEqual(value.eval(), 1.)
      sess.run(
          update_op,
          feed_dict={
              self.next_production_rules:
                  self.next_production_rules_values_1,
              self.predict_next_production_rules:
                  self.predict_next_production_rules_values_1,
              partial_sequence_lengths: [42, 42, 42, 42, 42, 42]})
      self.assertAlmostEqual(value.eval(), 1.)
Beispiel #9
0
    def test_custom_datasets_provider(self):
        if tf.executing_eagerly():
            # dataset.make_initializable_iterator is not supported when eager
            # execution is enabled.
            return
        file_pattern = os.path.join(self.testdata_dir, '*.jpg')
        batch_size = 3
        patch_size = 8
        images_ds_list = data_provider.provide_custom_datasets(
            batch_size=batch_size,
            image_file_patterns=[file_pattern, file_pattern],
            patch_size=patch_size)
        for images_ds in images_ds_list:
            self.assertListEqual([None, patch_size, patch_size, 3],
                                 images_ds.output_shapes.as_list())
            self.assertEqual(tf.float32, images_ds.output_types)

        iterators = [
            tf.data.make_initializable_iterator(x) for x in images_ds_list
        ]
        initialiers = [x.initializer for x in iterators]
        img_tensors = [x.get_next() for x in iterators]
        with self.cached_session() as sess:
            sess.run(tf.local_variables_initializer())
            sess.run(initialiers)
            images_out_list = sess.run(img_tensors)
            for images_out in images_out_list:
                self.assertTupleEqual((batch_size, patch_size, patch_size, 3),
                                      images_out.shape)
                self.assertTrue(np.all(np.abs(images_out) <= 1.0))
Beispiel #10
0
    def init_variables(self):
        self.sess.run(tf.global_variables_initializer())
        self.sess.run(tf.local_variables_initializer())

        print('Variables')
        for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
            print(i)  # i.name if you want just a name
Beispiel #11
0
    def __init__(self, net, batch_size, epoches, validation_step):
        '''
        Initialise the trainer
        '''

        # Create the network
        self._net = net
        # Set batch
        self._batch_size = batch_size
        # Set number of epoches
        self._epoches = epoches
        # Calculate and display validation error every 100 steps
        self._validation_step = validation_step
        # Minimum validation error - required for finding the best model
        self._min_error = 99999999999.9

        self._validation_error = []
        # Create tensorflow session and initialise graph and variables

        # Set the optimiser in the neural network
        self._loss_train, self._optimiser_train = self._net.set_loss_optimiser(
        )

        self._sess = tf.Session()
        self._sess.run(tf.global_variables_initializer())
        self._sess.run(tf.local_variables_initializer())
Beispiel #12
0
    def build_inference(self):
        self._is_graph_constructed = True
        with self.graph.as_default():

            self.kl_fast = -tf.reduce_mean(
                self.y_pred_fast.log_prob(self.tf_y_fast))
            self.kl_slow = -tf.reduce_mean(
                self.y_pred_slow.log_prob(self.tf_y_slow))

            self.reg_latent = tf.reduce_mean(self.nn_latent.losses)
            self.reg_bias = tf.reduce_mean(
                self.nn_feature_slow.losses) + tf.reduce_mean(
                    self.nn_targets_slow.losses)
            self.reg_loss = self.reg_latent + self.reg_bias

            self.loss_fast = self.kl_fast + self.reg_latent
            self.loss_slow = self.kl_slow + self.reg_bias

            self.optim_both = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate)
            self.train_op_both = self.optim_both.minimize(self.loss_slow +
                                                          self.coeff_both *
                                                          self.loss_fast)

            self.init_op = tf.group(tf.global_variables_initializer(),
                                    tf.local_variables_initializer())
            self.sess = tf.Session(graph=self.graph)
            with self.sess.as_default():
                self.sess.run(self.init_op)
Beispiel #13
0
    def __init__(self,
                 session,
                 model,
                 feature_ph_dict,
                 labels_ph,
                 initialize=True):
        """Create a Trainer object for a task.

    Args:
      session: a tf.Session, used to run the dset's iterator
      model: a Model
      feature_ph_dict: maps feature names to placeholders that will hold the
        corresponding inputs.
      labels_ph: a placeholder that will hold the target labels
      initialize: If true, run initializers that erase current model parameters.
    """
        self.session = session
        self.model = model
        self.feature_ph_dict = feature_ph_dict
        self.labels_ph = labels_ph
        if initialize:
            session.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer(),
                tf.tables_initializer()
            ])
Beispiel #14
0
def create_model(sess, config, cate_list):
    """モデルを読み込む"""

    print(json.dumps(config, indent=4), flush=True)
    model = Model(config, cate_list)

    print('All global variables:')
    for v in tf.global_variables():
        if v not in tf.trainable_variables():
            print('\t', v)
        else:
            print('\t', v, 'trainable')

    ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        print('Reloading model parameters..', flush=True)
        model.restore(sess, ckpt.model_checkpoint_path)
    else:
        if not os.path.exists(FLAGS.model_dir):
            os.makedirs(FLAGS.model_dir)
        print('Created new model parameters..', flush=True)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

    return model
Beispiel #15
0
def train(model, config, save_path):
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    log_steps = logarithmic_int_range(0, config['num_steps'],
                                      config['log_factor'], True)
    with tf.Session(config=sess_config) as sess:
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])
        sess.run(model.init_ops['train'])
        for step in range(config['num_steps'] + 1):
            if step in log_steps:
                tensors = sess.run(model.fetches['tensors'])
                tensors = count_real_eigen_values_fraction(tensors)
                save_tensors(tensors, os.path.join(save_path, 'tensors'))
                valid_metrics = test(sess, step, model, 'valid', save_path)
                sess.run(model.init_ops['train'])
                if step > 0:
                    save_metrics(step, res['metrics'],
                                 os.path.join(save_path, 'results/train'))
                log(step, res['metrics'] if step > 0 else None, valid_metrics)
            lr = config['lr_init'] \
                * config['lr_decay'] ** (step // config["lr_step"])
            res = sess.run(model.fetches['train'],
                           feed_dict={model.feed_dict['lr']: lr})
    def init_object_detector_graph(self, input_h, input_w, init_weights):

        self.is_train = _tf.placeholder(
            _tf.bool)  # Set flag for training or val

        # Create placeholders for image and labels
        self.images = _tf.placeholder(_tf.float32,
                                      [self.batch_size, input_h, input_w, 3],
                                      name="images")
        self.labels = _tf.placeholder(
            _tf.float32,
            [
                self.batch_size,
                self.grid_shape[0],
                self.grid_shape[1],
                self.num_anchors,
                self.num_classes + 5,
            ],
            name="labels",
        )

        self.tf_model = self.tiny_yolo(inputs=self.images,
                                       output_size=self.output_size)
        self.global_step = _tf.Variable(0, trainable=False, name="global_step")

        self.loss = self.loss_layer(self.tf_model, self.labels)
        self.base_lr = _utils.convert_shared_float_array_to_numpy(
            self.config["learning_rate"])
        self.num_iterations = int(
            _utils.convert_shared_float_array_to_numpy(
                self.config["num_iterations"]))
        self.init_steps = [
            self.num_iterations // 2,
            3 * self.num_iterations // 4,
            self.num_iterations,
        ]
        self.lrs = [
            _np.float32(self.base_lr * 10**(-i))
            for i, step in enumerate(self.init_steps)
        ]
        self.steps_tf = self.init_steps[:-1]
        self.lr = _tf.train.piecewise_constant(self.global_step, self.steps_tf,
                                               self.lrs)
        # TODO: Evaluate method to update lr in set_learning_rate()

        self.opt = _tf.train.MomentumOptimizer(self.lr, momentum=0.9)

        self.clip_value = _utils.convert_shared_float_array_to_numpy(
            self.config.get("gradient_clipping"))

        grads_and_vars = self.opt.compute_gradients(self.loss)
        clipped_gradients = [(self.ClipIfNotNone(g, self.clip_value), v)
                             for g, v in grads_and_vars]
        self.train_op = self.opt.apply_gradients(clipped_gradients,
                                                 global_step=self.global_step)

        self.sess.run(_tf.global_variables_initializer())
        self.sess.run(_tf.local_variables_initializer())

        self.load_weights(init_weights)
Beispiel #17
0
def compute_one_decoding_video_metrics(iterator, feed_dict, num_videos):
    """Computes the average of all the metric for one decoding.

  Args:
    iterator: dataset iterator.
    feed_dict: feed dict to initialize iterator.
    num_videos: number of videos.

  Returns:
    all_psnr: 2-D Numpy array, shape=(num_samples, num_frames)
    all_ssim: 2-D Numpy array, shape=(num_samples, num_frames)
  """
    output, target = iterator.get_next()
    metrics = psnr_and_ssim(output, target)

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        initalizer = iterator._initializer  # pylint: disable=protected-access
        if initalizer is not None:
            sess.run(initalizer, feed_dict=feed_dict)

        all_psnr, all_ssim = [], []
        for i in range(num_videos):
            print("Computing video: %d" % i)
            psnr_np, ssim_np = sess.run(metrics)
            all_psnr.append(psnr_np)
            all_ssim.append(ssim_np)
        all_psnr = np.array(all_psnr)
        all_ssim = np.array(all_ssim)
        return all_psnr, all_ssim
Beispiel #18
0
 def load_entities(self, base_dir):
     """Load entity ids and masks."""
     tf.reset_default_graph()
     id_ckpt = os.path.join(base_dir, "entity_ids")
     entity_ids = search_utils.load_database("entity_ids",
                                             None,
                                             id_ckpt,
                                             dtype=tf.int32)
     mask_ckpt = os.path.join(base_dir, "entity_mask")
     entity_mask = search_utils.load_database("entity_mask", None,
                                              mask_ckpt)
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         sess.run(tf.local_variables_initializer())
         tf.logging.info("Loading entity ids and masks...")
         np_ent_ids, np_ent_mask = sess.run([entity_ids, entity_mask])
     tf.logging.info("Building entity count matrix...")
     entity_count_matrix = search_utils.build_count_matrix(
         np_ent_ids, np_ent_mask)
     tf.logging.info("Computing IDFs...")
     self.idfs = search_utils.counts_to_idfs(entity_count_matrix,
                                             cutoff=1e-5)
     tf.logging.info("Computing entity Tf-IDFs...")
     ent_tfidfs = search_utils.counts_to_tfidf(entity_count_matrix,
                                               self.idfs)
     self.ent_tfidfs = normalize(ent_tfidfs, norm="l2", axis=0)
  def test_metric_fn_finetune_binary_classification(self):
    labels = tf.constant([1, 0, 1, 1])
    predicted_class = tf.constant([0, 0, 0, 1])
    siamese_example_loss = tf.constant([0.1, 0.2, 0.3, 0.4])
    is_real_example = tf.constant([1.0, 1.0, 1.0, 1.0])
    metrics_dict = metric_fns.metric_fn_finetune(
        predicted_class=predicted_class,
        labels=labels,
        siamese_example_loss=siamese_example_loss,
        is_real_example=is_real_example)
    init_g = tf.global_variables_initializer()
    init_l = tf.local_variables_initializer()
    with tf.Session() as sess:
      sess.run([init_g, init_l])
      # Runs update_op in metrics before checking the values.
      sess.run(metrics_dict)
      metrics_dict_numpy = sess.run(metrics_dict)
      self.assertEqual(metrics_dict_numpy["accuracy"][1].shape, ())
      self.assertAllClose(metrics_dict_numpy["accuracy"][1], 0.5)
      self.assertDTypeEqual(metrics_dict_numpy["accuracy"][1], np.float32)

      self.assertEqual(metrics_dict_numpy["precision"][1].shape, ())
      self.assertAllClose(metrics_dict_numpy["precision"][1], 1)
      self.assertDTypeEqual(metrics_dict_numpy["precision"][1], np.float32)

      self.assertEqual(metrics_dict_numpy["recall"][1].shape, ())
      self.assertAllClose(metrics_dict_numpy["recall"][1], 0.333333)
      self.assertDTypeEqual(metrics_dict_numpy["recall"][1], np.float32)

      self.assertEqual(metrics_dict_numpy["siamese_loss"][1].shape, ())
      self.assertAllClose(metrics_dict_numpy["siamese_loss"][1], 0.25)
      self.assertDTypeEqual(metrics_dict_numpy["siamese_loss"][1], np.float32)
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model metagraph and checkpoint
            print('Model directory: %s' % args.model_dir)
            meta_file, ckpt_file = get_model_filenames(
                os.path.expanduser(args.model_dir))

            print('Metagraph file: %s' % meta_file)
            print('Checkpoint file: %s' % ckpt_file)

            model_dir_exp = os.path.expanduser(args.model_dir)
            saver = tf.train.import_meta_graph(os.path.join(
                model_dir_exp, meta_file),
                                               clear_devices=True)
            tf.get_default_session().run(tf.global_variables_initializer())
            tf.get_default_session().run(tf.local_variables_initializer())
            saver.restore(tf.get_default_session(),
                          os.path.join(model_dir_exp, ckpt_file))

            # Retrieve the protobuf graph definition and fix the batch norm nodes
            input_graph_def = sess.graph.as_graph_def()

            # Freeze the graph def
            output_graph_def = freeze_graph_def(sess, input_graph_def,
                                                'embeddings,label_batch')

        # Serialize and dump the output graph to the filesystem
        with tf.gfile.GFile(args.output_file, 'wb') as f:
            f.write(output_graph_def.SerializeToString())
        print("%d ops in the final graph: %s" %
              (len(output_graph_def.node), args.output_file))
Beispiel #21
0
    def execute_cpu_tf1(self, compute_fn, inputs, graph=None):
        """Executes compute_fn on CPU with Tensorflow 1.X.

    Args:
      compute_fn: a function containing Tensorflow computation that takes a list
        of input numpy tensors, performs computation and returns output numpy
        tensors.
      inputs: a list of numpy arrays to feed input to the `compute_fn`.
      graph: (optional) If not None, provided `graph` is used for computation
        instead of a brand new tf.Graph().

    Returns:
      A list of numpy arrays or a single numpy array.
    """
        if self.is_tf2():
            raise ValueError(
                'Required version Tenforflow 1.X is not available.')
        with self.session(graph=(graph or tf.Graph())) as sess:
            placeholders = [
                tf.placeholder_with_default(v, v.shape) for v in inputs
            ]
            results = compute_fn(*placeholders)
            if (not (isinstance(results, dict)
                     or isinstance(results, tf.Tensor))
                    and hasattr(results, '__iter__')):
                results = list(results)
            sess.run([
                tf.global_variables_initializer(),
                tf.tables_initializer(),
                tf.local_variables_initializer()
            ])
            materialized_results = sess.run(results,
                                            feed_dict=dict(
                                                zip(placeholders, inputs)))
        return self.maybe_extract_single_output(materialized_results)
Beispiel #22
0
def init():
    global g_tf_sess, probabilities, label_dict, input_images
    subprocess.run(["git", "clone", "https://github.com/tensorflow/models/"])
    sys.path.append("./models/research/slim")

    parser = argparse.ArgumentParser(
        description="Start a tensorflow model serving")
    parser.add_argument('--model_name', dest="model_name", required=True)
    parser.add_argument('--labels_dir', dest="labels_dir", required=True)
    args, _ = parser.parse_known_args()
    from nets import inception_v3, inception_utils
    label_dict = get_class_label_dict(args.labels_dir)
    classes_num = len(label_dict)
    tf.disable_v2_behavior()
    with slim.arg_scope(inception_utils.inception_arg_scope()):
        input_images = tf.placeholder(tf.float32,
                                      [1, image_size, image_size, num_channel])
        logits, _ = inception_v3.inception_v3(input_images,
                                              num_classes=classes_num,
                                              is_training=False)
        probabilities = tf.argmax(logits, 1)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    g_tf_sess = tf.Session(config=config)
    g_tf_sess.run(tf.global_variables_initializer())
    g_tf_sess.run(tf.local_variables_initializer())

    model_path = Model.get_model_path(args.model_name)
    saver = tf.train.Saver()
    saver.restore(g_tf_sess, model_path)
Beispiel #23
0
    def test_custom_data_provider(self):
        if tf.executing_eagerly():
            # dataset.make_initializable_iterator is not supported when eager
            # execution is enabled.
            return
        file_pattern = os.path.join(self.testdata_dir, '*.jpg')
        batch_size = 3
        patch_size = 8
        images_list = data_provider.provide_custom_data(
            batch_size=batch_size,
            image_file_patterns=[file_pattern, file_pattern],
            patch_size=patch_size)
        for images in images_list:
            self.assertListEqual([batch_size, patch_size, patch_size, 3],
                                 images.shape.as_list())
            self.assertEqual(tf.float32, images.dtype)

        with self.cached_session() as sess:
            sess.run(tf.local_variables_initializer())
            sess.run(tf.tables_initializer())
            images_out_list = sess.run(images_list)
            for images_out in images_out_list:
                self.assertTupleEqual((batch_size, patch_size, patch_size, 3),
                                      images_out.shape)
                self.assertTrue(np.all(np.abs(images_out) <= 1.0))
def main(_):
    runner_config = load_runner_config()
    model_config = runner_config["model_config"]
    rel_module_path = ""  # empty base dir
    model = importlib.import_module(rel_module_path + runner_config["name"])
    with tf.Graph().as_default() as graph:
        with tf.Session(graph=graph) as session:
            text = tf.placeholder(tf.string, shape=[1], name="Input")
            prxlayer = projection_layers.ProjectionLayer(
                model_config, base_layers.TFLITE)
            encoder = model.Encoder(model_config, base_layers.TFLITE)
            projection, seq_lengh = prxlayer(text)
            logits = encoder(projection, seq_lengh)
            if FLAGS.output == "logits":
                outputs = logits
            elif FLAGS.output == "sigmoid":
                outputs = tf.math.sigmoid(logits)
            else:
                assert FLAGS.output == "softmax", "Unexpected output"
                outputs = tf.nn.softmax(logits)

            session.run(tf.global_variables_initializer())
            session.run(tf.local_variables_initializer())
            saver = tf.train.Saver()
            saver.restore(session,
                          tf.train.latest_checkpoint(FLAGS.output_dir))
            tflite_fb = tflite_utils.generate_tflite(session, graph, [text],
                                                     [outputs])
            output_file_name = os.path.join(FLAGS.output_dir, "tflite.fb")
            with tf.gfile.Open(output_file_name, "wb") as f:
                f.write(tflite_fb)
  def test_next_production_rule_valid_ratio_with_length(self):
    partial_sequence_lengths = tf.placeholder(tf.int32, shape=[None])
    value, update_op = metrics.next_production_rule_valid_ratio(
        unmasked_probabilities_batch=self.unmasked_probabilities_batch,
        next_production_rule_masks=self.next_production_rule_masks,
        partial_sequence_lengths=partial_sequence_lengths,
        target_length=1)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())
      sess.run(
          update_op,
          feed_dict={
              self.unmasked_probabilities_batch:
                  self.unmasked_probabilities_batch_values_0,
              self.next_production_rule_masks:
                  self.next_production_rule_masks_values_0,
              partial_sequence_lengths: [42, 1]})
      # Only the second example has matched partial sequence length, thus will
      # be used to compute valid ratio. The next production rule in this example
      # is valid. So the mean ratio is 1.
      self.assertAlmostEqual(value.eval(), 1.)
      sess.run(
          update_op,
          feed_dict={
              self.unmasked_probabilities_batch:
                  self.unmasked_probabilities_batch_values_1,
              self.next_production_rule_masks:
                  self.next_production_rule_masks_values_1,
              partial_sequence_lengths: [1, 42]})
      # Only the first example has matched partial sequence length, thus will
      # be used to compute valid ratio. The next production rule in this example
      # is not valid. So the mean ratio is (1. + 0.) / 2 = 0.5
      self.assertAlmostEqual(value.eval(), 0.5)
Beispiel #26
0
    def build_computational_graph(self):
        self.input_batch = tf.placeholder(
            tf.float32,
            shape=[None, self.frame_shape[0], self.frame_shape[1], 3])

        # to implement the learning rate decay
        self.curr_learn_rate = tf.Variable(parameters.VAE_LEARNING_RATE,
                                           trainable=False)
        global_step = tf.Variable(0, name='global_step', trainable=False)

        # calculate new learning rate value
        self.inc_global_step = tf.assign(global_step, global_step + 1)
        self.reset_global_step = tf.assign(global_step, 0)

        self.encoder()
        self.reparametrization_trick()
        self.decoder()
        self.define_optimizer()

        #Init Session
        init_vars = [
            tf.local_variables_initializer(),
            tf.global_variables_initializer()
        ]
        gpu_options = tf.GPUOptions(allow_growth=True)
        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        self.sess.run(init_vars)
        tf.set_random_seed(1)
        self.collect_assign_ops()
Beispiel #27
0
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
    """Initialize or restore variables from a checkpoint if available.

  Args:
    sess: Session to initialize variables in.
    saver: Saver to restore variables.
    logdir: Directory to search for checkpoints.
    checkpoint: Specify what checkpoint name to use; defaults to most recent.
    resume: Whether to expect recovering a checkpoint or starting a new run.

  Raises:
    ValueError: If resume expected but no log directory specified.
    RuntimeError: If no resume expected but a checkpoint was found.
  """
    sess.run(
        tf.group(tf.local_variables_initializer(),
                 tf.global_variables_initializer()))
    if resume and not (logdir or checkpoint):
        raise ValueError('Need to specify logdir to resume a checkpoint.')
    if logdir:
        state = tf.train.get_checkpoint_state(logdir)
        if checkpoint:
            checkpoint = os.path.join(logdir, checkpoint)
        if not checkpoint and state and state.model_checkpoint_path:
            checkpoint = state.model_checkpoint_path
        if checkpoint and resume is False:
            message = 'Found unexpected checkpoint when starting a new run.'
            raise RuntimeError(message)
        if checkpoint:
            saver.restore(sess, checkpoint)
Beispiel #28
0
    def test_bucket_by_quantiles(self):
        with self.test_session() as sess:
            data = tf.data.Dataset.from_tensor_slices(list(range(10))).repeat()
            data = data.apply(
                ops.bucket_by_quantiles(len_fn=lambda x: x,
                                        batch_size=4,
                                        n_buckets=2,
                                        hist_bounds=[2, 4, 6, 8]))
            it = data.make_initializable_iterator()
            sess.run(it.initializer)
            sess.run(tf.local_variables_initializer())
            next_op = it.get_next()

            # Let the model gather statistics, it sees 4*5=20 = 2 epochs,
            # so each bin should have a count of 4
            for _ in range(5):
                sess.run(next_op)

            counts = sess.run(tf.local_variables()[0])
            self.assertEqual(counts.tolist(), [4, 8, 12, 16, 20])

            # At this point the model should perfectly quantize the input
            for _ in range(4):
                out = sess.run(next_op)
                if out[0] < 5:
                    self.assertAllInRange(out, 0, 5)
                else:
                    self.assertAllInRange(out, 5, 10)
Beispiel #29
0
    def callback(self):
        # Convert the camera image to a cv image
        pil_image = PIL.Image.open('picture.jpg').convert('RGB')
        open_cv_image = np.array(pil_image)
        # Convert RGB to BGR
        cv_image = open_cv_image[:, :, ::-1].copy()
        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

        # Mask the goal in the image
        g_mask = cv2.inRange(hsv, self.g_lower_bound, self.g_upper_bound)
        res = cv2.bitwise_and(cv_image, cv_image, mask=g_mask)
        res[g_mask == 255] = (0, 0, 255)

        obs = np.squeeze(res)
        new_img = cv2.resize(obs, TRAINING_DIMS, interpolation=cv2.INTER_AREA)

        np_img = new_img[np.newaxis, :, :, :]
        print(np_img.shape)
        np_img = np_img / 255.0

        feed_dict = {
            "visual_observation_0:0": np_img,
            "batch_size:0": 1,
            "sequence_length:0": 1
        }
        self.sess.run(tf.global_variables_initializer())
        self.sess.run(tf.local_variables_initializer())
        action_probs = self.sess.run("action_probs:0", feed_dict=feed_dict)
        action = np.argmax(action_probs)
        cv2.imwrite("C:/Users/drewb/PycharmProjects/SADrone/output.png",
                    new_img)
Beispiel #30
0
    def __init__(self, model_name, model_dir, corpus_file, substr_len):
        self.model = model_name
        self.model_dir = model_dir
        self.corpus_file = corpus_file
        self.log_dir = "./log/predict/%s" % self.model
        assert substr_len
        self.substr_len = substr_len + 2

        print('## loading corpus from %s' % self.model_dir)
        # 导入语料
        # poems_vector: 二维ndarray, 语料矩阵, 每行为一个数据, 其中每个字用对应的序号表示
        # word_to_int: dict, 字到对应序号的映射
        # vocabularies: 单词表, 出现频率由高到低
        poems_vector, self.word_int_map, self.vocabularies = process_poems(self.corpus_file)

        # 生成RNN模型
        graph = tf.Graph()
        with graph.as_default():
            self.input_data = tf.placeholder(tf.int32, [1, 2, 1], name='character')
            self.pos_mat = tf.placeholder(tf.int32, [1, 2, 1], name='position')
            rnn = RNNModel(
                self.model, num_layers=2, rnn_size=64, batch_size=64, vocabularies=self.vocabularies, 
                add_dim=add_feature_dim, substr_len=self.substr_len
            )
            self.endpoints = rnn.predict(input_data=self.input_data, add_data=self.pos_mat)
            saver = tf.train.Saver(tf.global_variables())
            init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

        self.sess = tf.Session(graph = graph)
        self.sess.run(init_op)       # init

        # 检查最近的checkpoint
        checkpoint = tf.train.latest_checkpoint(self.model_dir)
        # 从中复原
        saver.restore(self.sess, checkpoint)