Example #1
0
def main(argv):
    """Run the training experiment."""
    # Define model parameters
    if FLAGS.abs_and_rel:
        num_classes = 12
    else:
        num_classes = 6

    params = tf.contrib.training.HParams(
        learning_rate=FLAGS.learning_rate,
        n_classes=num_classes,
        train_steps=TRAIN_STEPS * FLAGS.epochs,
        min_eval_frequency=TRAIN_STEPS * 10  # every 10 epochs evaluate
    )

    # Session config
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_fraction

    # Set the run_config and the directory to save the model and stats
    run_config = tf.contrib.learn.RunConfig(session_config=config)
    run_config = run_config.replace(model_dir='./' + FLAGS.save_dir)

    learn_runner.run(
        experiment_fn=get_experiment_fn(
            FLAGS.dataset_dir),  # First-class function
        run_config=run_config,  # RunConfig
        schedule="train_and_evaluate",  # What to run
        hparams=params  # HParams
    )
Example #2
0
def train_and_eval():
  """Train and eval routine."""
  learn_runner.run(
      experiment_fn=_experiment_fn,
      schedule=FLAGS.schedule,
      run_config=_get_config(),
      hparams=_get_hparams())
def main(unused_argv):

  # Load training and eval data
  mnist = read_data_sets(FLAGS.data_dir,
      source_url=FLAGS.datasource_url)

  train_data = mnist.train.images  # Returns np.array
  train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
  eval_data = mnist.test.images  # Returns np.array
  eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)


  def serving_input_receiver_fn():
      feature_tensor = tf.placeholder(tf.float32, [None, 784])
      return tf.estimator.export.ServingInputReceiver({'x': feature_tensor}, {'x': feature_tensor})

  learn_runner.run(
      generate_experiment_fn(
          min_eval_frequency=1,
          train_steps=FLAGS.num_steps,
          eval_steps=FLAGS.eval_steps,
          export_strategies=[saved_model_export_utils.make_export_strategy(
              serving_input_receiver_fn,
              exports_to_keep=1
          )]
      ),
      run_config = tf.contrib.learn.RunConfig().replace(model_dir=FLAGS.job_dir, save_checkpoints_steps=1000),
      hparams=hparam.HParams(dataset=mnist.train, eval_data=eval_data, eval_labels=eval_labels),
  )
def main(unused_argv):

  # Load training and eval data
  mnist = read_data_sets(FLAGS.data_dir,
      source_url=FLAGS.datasource_url)

  train_data = mnist.train.images  # Returns np.array
  train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
  eval_data = mnist.test.images  # Returns np.array
  eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)


  def serving_input_receiver_fn():
      feature_tensor = tf.placeholder(tf.float32, [None, 784])
      return tf.estimator.export.ServingInputReceiver({'x': feature_tensor}, {'x': feature_tensor})

  learn_runner.run(
      generate_experiment_fn(
          min_eval_frequency=1,
          train_steps=FLAGS.num_steps,
          eval_steps=FLAGS.eval_steps,
          export_strategies=[saved_model_export_utils.make_export_strategy(
              serving_input_receiver_fn,
              exports_to_keep=1
          )]
      ),
      run_config = tf.contrib.learn.RunConfig().replace(model_dir=FLAGS.job_dir, save_checkpoints_steps=1000),
      hparams=hparam.HParams(dataset=mnist.train, eval_data=eval_data, eval_labels=eval_labels),
  )
def run_experiment(argv=None):

    run_config = tf.contrib.learn.RunConfig()
    run_config = run_config.replace(model_dir=FLAGS.experiment_dir)

    learn_runner.run(experiment_fn=experiment_fn,
                     run_config=run_config,
                     schedule='train_and_evaluate')
def train(argv=None):
    run_config = tf.contrib.learn.RunConfig(
        model_dir=hparams.model_dir,
        save_checkpoints_steps=hparams.save_checkpoints_steps,
    )

    learn_runner.run(experiment_fn=experiment_fn,
                     run_config=run_config,
                     schedule="train",
                     hparams=hparams)
Example #7
0
def main():
  run_config = tf.contrib.learn.RunConfig()
  run_config = run_config.replace(
      model_dir='/tmp/est_model',
      save_summary_steps=1,
      save_checkpoints_steps=10
  )

  learn_runner.run(experiment_fn=make_experiment,
                   run_config=run_config,
                   schedule='train_and_evaluate')
Example #8
0
def main(_):
    run_config = tf.contrib.learn.RunConfig(model_dir='/tmp/cus_model',
                                            save_summary_steps=100,
                                            save_checkpoints_steps=100)

    hparams = tf.contrib.training.HParams(learning_rate=0.003)

    learn_runner.run(experiment_fn=experiment_fn,
                     run_config=run_config,
                     hparams=hparams,
                     schedule='train_and_evaluate')
Example #9
0
def main(_):
    config = tf.contrib.learn.RunConfig(model_dir='/tmp/numerai/logs',
                                        save_summary_steps=100,
                                        save_checkpoints_steps=100)

    params = tf.contrib.training.HParams(learning_rate=0.00005)

    learn_runner.run(experiment_fn=_experiment_fn,
                     run_config=config,
                     hparams=params,
                     schedule='train_and_evaluate')
Example #10
0
    def train(self):
        params = tf.contrib.training.HParams(
            train_steps=self.max_steps, min_eval_frequency=self.snapshot_iter)

        run_config = tf.contrib.learn.RunConfig()
        run_config = run_config.replace(
            model_dir=self.snapshot_dir,
            save_checkpoints_steps=self.snapshot_iter)

        learn_runner.run(experiment_fn=self.experiment_fn,
                         run_config=run_config,
                         schedule="train_and_evaluate",
                         hparams=params)
    def run_experiment(self, experiment_fn):
        """Run the training experiment."""
        # Define model parameters
        hparams = HParams()
        for k, v in self.default_param_dict.items():
            hparams.add_hparam(k, v)

        learn_runner.run(
            experiment_fn=experiment_fn,  # First-class function
            run_config=self.run_config,  # RunConfig
            schedule="train_and_evaluate",  # What to run
            hparams=hparams  # HParams
        )
Example #12
0
def main(unused_argv):
    learn_runner.run(experiment_fn=_make_experiment_fn,
                     output_dir=FLAGS.output_dir,
                     schedule='train_and_evaluate')

    # Run inference on the test dataset
    feature_columns, test_input_fn = _make_input_fn('test')

    estimator = _get_tfbt(FLAGS.output_dir, feature_columns)
    results = estimator.predict(input_fn=test_input_fn)

    y_predict = np.array([r['probabilities'][1] for r in results])
    np.save(os.path.join(FLAGS.output_dir, 'pred_tf.npy'), y_predict)
Example #13
0
    def run_experiment(self, model_dir):
        """Runs the training experiment."""
        # Set the run_config and the directory to save the model and stats
        #with self._graph.as_default():
        run_config = tf.contrib.learn.RunConfig()
        run_config = run_config.replace(model_dir=model_dir)
        run_config = run_config.replace(save_summary_steps=500)

        learn_runner.run(
            experiment_fn=self.experiment_fn,  # First-class function
            run_config=run_config,  # RunConfig
            schedule="train_and_evaluate",  # What to run
            hparams=self._params  # HParams
        )
Example #14
0
def main():
    hparams = HParams(**HPARAMS)
    run_config = RunConfig(model_dir='./save')

    if len(argv) < 2 or argv[1] == 'train':
        learn_runner.run(
            experiment_fn=experiment_fn,
            run_config=run_config,
            schedule="train_and_evaluate",
            hparams=hparams,
        )
    elif argv[1] == 'predict':
        pass
    else:
        print('Unknown Operation.')
Example #15
0
def run_experiment(argsv=None):
    ''' Run the training experiment '''
    # Define the model parameters
    params = tf.contrib.training.HParams(learning_rate=0.002,
                                         n_classes=10,
                                         train_steps=5000,
                                         min_eval_frequency=100,
                                         batch_size=FLAGS.batch_size)

    # Set the run_config and the directory to save the model and stats
    run_config = tf.contrib.learn.RunConfig(model_dir=FLAGS.model_dir)

    learn_runner.run(
        experiment_fn=experiment_fn,  # First-class function
        run_config=run_config,  # RunConfig
        schedule="train_and_evaluate",  # What to run
        hparams=params)
Example #16
0
def run_and_get_loss(params, run_config):
    # dataset = preprocess.get_dataset(data.DATA_PATH)
    # threading.Thread(target=lambda: random_sample(dataset))
    runner = learn_runner.run(experiment_fn=experiment_fn,
                              run_config=run_config,
                              schedule="train_and_evaluate",
                              hparams=params)
    return runner[0]['loss']
Example #17
0
def main():

    args_parser = argparse.ArgumentParser()
    args = parameters.initialise_arguments(args_parser)
    parameters.HYPER_PARAMS = hparam.HParams(**args.__dict__)

    # Set python level verbosity
    tf.logging.set_verbosity(args.verbosity)

    # Set C++ Graph Execution level verbosity
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf.logging.__dict__[args.verbosity] / 10)

    # Directory to store output model and checkpoints
    output_dir = args.job_dir

    # If job_dir_reuse is False then remove the job_dir if it exists
    if not args.reuse_job_dir:
        if tf.gfile.Exists(args.job_dir):
            tf.gfile.DeleteRecursively(args.job_dir)
            tf.logging.info("Deleted job_dir {} to avoid re-use".format(args.job_dir))
        else:
            tf.logging.info("No job_dir available to delete")
    else:
        tf.logging.info("Reusing job_dir {} if it exists".format(args.job_dir))

    # Run the training experiment
    learn_runner.run(
        experiment.generate_experiment_fn(
            min_eval_frequency=args.min_eval_frequency,
            eval_delay_secs=args.eval_delay_secs,
            train_steps=args.train_steps,
            eval_steps=args.eval_steps,
            export_strategies=[saved_model_export_utils.make_export_strategy(
                serving.SERVING_FUNCTIONS[args.export_format],
                exports_to_keep=1,
                default_output_alternative_key=None,
            )]
        ),
        run_config=tf.contrib.learn.RunConfig(
            model_dir=output_dir,
            log_device_placement=True
        ),
        schedule="train_and_evaluate",
        hparams=parameters.HYPER_PARAMS
    )
Example #18
0
def run_experiment(argv=None):
    """Run the training experiment"""
    # Define model parameters
    params = tf.contrib.training.HParams(learning_rate=0.002,
                                         n_classes=10,
                                         train_steps=5000,
                                         min_eval_frequency=100)

    # Set the run_config and the directory to save the model and stats
    run_config = tf.contrib.learn.RunConfig()
    run_config = run_config.replace(model_dir=FLAGS.model_dir)

    learn_runner.run(
        experiment_fn=experiment_fn,  # First-class function
        run_config=run_config,
        schedule="train_and_evaluate",
        hparams=params  # HParams
    )
Example #19
0
def run_experiment(unused_argv):
    """Run the training experiment."""
    hyperparameters_dict = FLAGS.__flags

    # Build the hyperparameters object
    params = HParams(**hyperparameters_dict)

    # Set the seeds
    np.random.seed(params.random_seed)
    tf.set_random_seed(params.random_seed)

    # Initialise the run config
    run_config = tf.contrib.learn.RunConfig()

    # Use JIT XLA
    session_config = tf.ConfigProto()
    if params.use_jit_xla:
        session_config.graph_options.optimizer_options.global_jit_level = (
            tf.OptimizerOptions.ON_1)

    # Clean the model directory
    if os.path.exists(params.model_dir) and params.clean_model_dir:
        shutil.rmtree(params.model_dir)

    # Update the run config
    run_config = run_config.replace(tf_random_seed=params.random_seed)
    run_config = run_config.replace(model_dir=params.model_dir)
    run_config = run_config.replace(session_config=session_config)
    run_config = run_config.replace(
        save_checkpoints_steps=params.min_eval_frequency)

    # Output relevant info for inference
    ex.save_dict_json(d=params.values(),
                      path=os.path.join(params.model_dir, 'params.dict'),
                      verbose=True)
    ex.save_obj(obj=params,
                path=os.path.join(params.model_dir, 'params.pkl'),
                verbose=True)

    learn_runner.run(experiment_fn=ex.experiment_fn,
                     run_config=run_config,
                     schedule='train_and_evaluate',
                     hparams=params)
Example #20
0
def main():
    hparams = HParams(**HPARAMS)
    run_config = RunConfig(model_dir='./save')

    if len(argv) < 2 or argv[1] == 'train':
        learn_runner.run(
            experiment_fn=experiment_fn,
            run_config=run_config,
            schedule="train_and_evaluate",
            hparams=hparams,
        )
    elif argv[1] == 'predict':
        input_fn_factory = ModelInputs(hparams.vocab_paths, 1)
        predict_input_fn, predict_input_hook = input_fn_factory.get_inputs(hparams.predict_dataset_path,
                mode=estimator.ModeKeys.PREDICT, num_infer=1)
        classifier = get_estimator(run_config, hparams)
        predictions = classifier.predict(input_fn=predict_input_fn, hooks=[predict_input_hook])
        print_predictions(predictions, hparams)
    else:
        print('Unknown Operation.')
Example #21
0
def main():
    hparams = HParams(**HPARAMS)
    run_config = RunConfig(model_dir='./save')

    if len(argv) < 2 or argv[1] == 'train':
        learn_runner.run(
            experiment_fn=experiment_fn,
            run_config=run_config,
            schedule="train_and_evaluate",
            hparams=hparams,
        )
    elif argv[1] == 'predict':
        input_fn_factory = ModelInputs(hparams.vocab_paths, 1)
        predict_input_fn, predict_input_hook = input_fn_factory.get_inputs(
            hparams.predict_dataset_path,
            mode=estimator.ModeKeys.PREDICT,
            num_infer=1)
        classifier = get_estimator(run_config, hparams)
        predictions = classifier.predict(input_fn=predict_input_fn,
                                         hooks=[predict_input_hook])
        print_predictions(predictions, hparams)
    else:
        print('Unknown Operation.')
Example #22
0
def run_experiment(argv=None):
    """Run the training experiment."""
    # Define model parameters

    params = tf.contrib.training.HParams(lr=FLAGS.lr,
                                         train_steps=None,
                                         out_dim=FLAGS.out_size,
                                         depth=128,
                                         latent_size=FLAGS.latent_size,
                                         batch_size=FLAGS.batch_size)
    # Set the run_config and the directory to save the model and stats
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    # config.gpu_options.per_process_gpu_memory_fraction = 0.9
    run_config = tf.contrib.learn.RunConfig(session_config=config,
                                            save_checkpoints_secs=30 * 60,
                                            keep_checkpoint_max=70)
    run_config = run_config.replace(model_dir=FLAGS.model_dir)

    learn_runner.run(experiment_fn=experiment_fn,
                     run_config=run_config,
                     schedule="train_and_evaluate",
                     hparams=params)
Example #23
0
def main(unused_argv):
  learn_runner.run(
      experiment_fn=_make_experiment_fn,
      output_dir=FLAGS.output_dir,
      schedule="train_and_evaluate")
Example #24
0
def main(unused_argv):
  learn_runner.run(
      experiment_fn=_make_experiment_fn,
      output_dir=FLAGS.output_dir,
      schedule="train_and_evaluate")
Example #25
0
      choices=['JSON', 'CSV', 'EXAMPLE'],
      default='JSON'
  )

  args = parser.parse_args()

  # Set python level verbosity
  tf.logging.set_verbosity(args.verbosity)
  # Set C++ Graph Execution level verbosity
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
      tf.logging.__dict__[args.verbosity] / 10)

  # Run the training job
  # learn_runner pulls configuration information from environment
  # variables using tf.learn.RunConfig and uses this configuration
  # to conditionally execute Experiment, or param server code
  learn_runner.run(
      generate_experiment_fn(
          min_eval_frequency=args.min_eval_frequency,
          eval_delay_secs=args.eval_delay_secs,
          train_steps=args.train_steps,
          eval_steps=args.eval_steps,
          export_strategies=[saved_model_export_utils.make_export_strategy(
              model.SERVING_FUNCTIONS[args.export_format],
              exports_to_keep=1
          )]
      ),
      run_config=tf.contrib.learn.RunConfig(model_dir=args.job_dir),
      hparams=hparam.HParams(**args.__dict__)
  )
Example #26
0
 def train(self, feed_data):
     self.train_data = feed_data["inputs"]
     self.train_label = feed_data["labels"]
     learn_runner.run(experiment_fn=self._make_experiment_fn,
                      output_dir=str("/tmp/mnist"),
                      schedule="train_and_evaluate")
Example #27
0
      help='The input format of the exported SavedModel binary',
      choices=['JSON', 'CSV', 'EXAMPLE'],
      default='JSON'
  )
  """

    args = parser.parse_args()

    print(args.eval_files)
    print(args.train_files)

    # Set python level verbosity
    tf.logging.set_verbosity(args.verbosity)
    # Set C++ Graph Execution level verbosity
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
        tf.logging.__dict__[args.verbosity] / 10)

    # Run the training job
    # learn_runner pulls configuration information from environment
    # variables using tf.learn.RunConfig and uses this configuration
    # to conditionally execute Experiment, or param server code
    learn_runner.run(
        generate_experiment_fn(
            min_eval_frequency=args.min_eval_frequency,
            eval_delay_secs=args.eval_delay_secs,
            train_steps=args.train_steps,
            eval_steps=args.eval_steps,
        ),
        run_config=tf.contrib.learn.RunConfig(model_dir=args.job_dir),
        hparams=hparam.HParams(**args.__dict__))
Example #28
0
    queue_capacity=3 * BATCH_SIZE + 10,
    num_threads=1,
)
val_input_fn = generator_input_fn(
    x=data_generator_fn(val_meta_list),
    target_key=TARGET_KEY,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_epochs=None,
    queue_capacity=3 * BATCH_SIZE + 10,
    num_threads=1,
)


def experiment_fn(run_config, hparams):
    return Experiment(
        estimator=create_model(config=run_config, hparams=hparams),
        train_input_fn=train_input_fn,
        eval_input_fn=val_input_fn,
        train_steps=10000,
        eval_steps=200,
        train_steps_per_iteration=1000,
    )


# os.makedirs(os.path.join(MODEL_DIR, 'eval'), exist_ok=True)
learn_runner.run(experiment_fn=experiment_fn,
                 run_config=RunConfig(model_dir=model_dir),
                 schedule="continuous_train_and_eval",
                 hparams=HParams(**params))
Example #29
0
        tf.logging.__dict__[args.verbosity] / 10)

    # Run the training job
    # learn_runner pulls configuration information from environment
    # variables using tf.learn.RunConfig and uses this configuration
    # to conditionally execute Experiment, or param server code
    learn_runner.run(
        generate_experiment_fn(
            min_eval_frequency=args.min_eval_frequency,
            eval_delay_secs=args.eval_delay_secs,
            train_steps=args.train_steps,
            eval_steps=args.eval_steps,
            export_strategies=[
                saved_model_export_utils.make_export_strategy(
                    model.SERVING_FUNCTIONS[args.export_format],
                    exports_to_keep=1)
            ]),
        # Note:  For now, we are calcuating the version number from the current timestamp
        # TODO:  Check OUT get_timestamped_export_dir() from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py
        run_config=tf.contrib.learn.RunConfig(
            model_dir=os.path.join(os.environ['PIPELINE_OUTPUT_PATH'],
                                   datetime.now().strftime("%s"))),
        # Note:  The following is throwing this error:
        #    return model_dir and model_dir.startswith("gs://")
        #TypeError: startswith first arg must be bytes or a tuple of bytes, not str
        #      run_config=tf.contrib.learn.RunConfig(model_dir=saved_model_export_utils.get_timestamped_export_dir(os.environ['PIPELINE_OUTPUT_PATH'])),
        schedule="train",  #local_run
        hparams=hparam.HParams(**args.__dict__))

# TODO:  Used Estimator.export_savedmodel or saved_model_export_utils.export_fn(estimator, export_dir_base, checkpoint_path)
Example #30
0
        '--export-format',
        help='The input format of the exported SavedModel binary',
        choices=['JSON', 'CSV', 'EXAMPLE'],
        default='JSON')

    args = parser.parse_args()

    # Set python level verbosity
    tf.logging.set_verbosity(args.verbosity)
    # Set C++ Graph Execution level verbosity
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
        tf.logging.__dict__[args.verbosity] / 10)

    # Run the training job
    # learn_runner pulls configuration information from environment
    # variables using tf.learn.RunConfig and uses this configuration
    # to conditionally execute Experiment, or param server code
    learn_runner.run(
        generate_experiment_fn(
            min_eval_frequency=args.min_eval_frequency,
            eval_delay_secs=args.eval_delay_secs,
            train_steps=args.train_steps,
            eval_steps=args.eval_steps,
            export_strategies=[
                saved_model_export_utils.make_export_strategy(
                    model.SERVING_FUNCTIONS[args.export_format],
                    exports_to_keep=1)
            ]),
        run_config=tf.contrib.learn.RunConfig(model_dir=args.job_dir),
        hparams=hparam.HParams(**args.__dict__))
Example #31
0
def main(_):
    """Run the training experiment."""

    scopes = {
        'net_scope': 'CifarConvNet',
        'emb_scope': 'EmbLayer',
        'metrics_scope': 'MetricsLayer',
        'emb_name': 'Embeddings',
        'label_name': 'labels'
    }

    # Define hook
    hook = SessionHook(scopes)

    # Define model parameters
    params = tf.contrib.training.HParams(
        learning_rate=FLAGS.learning_rate,
        n_classes=FLAGS.n_classes,
        batch_size=FLAGS.batch_size,
        train_steps=FLAGS.steps,
        # min_eval_frequency=100,
        model=FLAGS.model,
        dataset_dir=FLAGS.dataset_dir,
        checkpoint_dir=FLAGS.checkpoint_dir,
        hooks={
            # 'iterator_init_hook': iterator_init_hook,
            # 'embeddings_hook': embeddings_hook,
            'hook': hook
        },
        scopes=scopes
    )

    if FLAGS.train and FLAGS.evaluate:
        what_to_run = "train_and_evaluate"
    elif FLAGS.train:
        what_to_run = "train"
    elif FLAGS.evaluate:
        what_to_run = "evaluate"
    else:
        raise EnvironmentError("You have to init --train or --evaluate flag.")

    # Set the run_config and the directory to save the model and stats
    run_config = tf.contrib.learn.RunConfig()
    run_config = run_config.replace(model_dir=FLAGS.checkpoint_dir)

    learn_runner.run(
        experiment_fn=Experiment(params).get_experiment_fn,  # First-class function
        run_config=run_config,  # RunConfig
        schedule=what_to_run,  # What to run
        hparams=params  # HParams
    )

    """ ------------------------------------------ Embeddings saving ------------------------------------------ """

    classes = Cifar10(params.dataset_dir).get_classnames()
    embeddings = hook.get_embeddings()

    values = embeddings['values']
    labels = embeddings['labels']
    captions = [classes[x] for x in labels]

    if not os.path.isdir(os.path.join(FLAGS.checkpoint_dir, 'projector')):
        os.makedirs(os.path.join(FLAGS.checkpoint_dir, 'projector'))

    with open(os.path.join(FLAGS.checkpoint_dir, 'projector/metadata.tsv'), 'w+') as f:
        f.write('Index\tCaption\tLabel\n')
        for idx in range(len(labels)):
            f.write('{:05d}\t{}\t{}\n'
                    .format(idx, captions[idx], labels[idx]))
        f.close()

    with tf.Session() as sess:
        # The embedding variable to be stored
        embedding_var = tf.Variable(np.array(values), name='emb_values')
        sess.run(embedding_var.initializer)

        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = embedding_var.name

        # Add metadata to the log
        embedding.metadata_path = os.path.join(FLAGS.checkpoint_dir, "projector/metadata.tsv")

        writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, 'projector/'), sess.graph)
        projector.visualize_embeddings(writer, config)

        saver = tf.train.Saver([embedding_var])
        saver.save(sess, os.path.join(FLAGS.checkpoint_dir, "projector/model_emb.ckpt"), 1)