Esempio n. 1
0
def main(argv):
    del argv  # Unused
    if FLAGS.enable_eager:
        tf.enable_eager_execution()
    tf.config.set_soft_device_placement(True)

    config_params = FLAGS.config_param or []
    config_params += train_eval.get_gin_override_params(FLAGS.output_dir)
    base_config_path = os.path.dirname(FLAGS.config_path[0])
    gin.add_config_file_search_path(base_config_path)
    gin.parse_config_files_and_bindings(FLAGS.config_path, config_params)

    if FLAGS.num_virtual_gpus > -1:
        gpus = tf.config.experimental.list_physical_devices("GPU")

        total_gpu_mem_limit = 8192
        per_gpu_mem_limit = total_gpu_mem_limit / FLAGS.num_virtual_gpus
        virtual_gpus = [
            tf.config.experimental.VirtualDeviceConfiguration(
                memory_limit=per_gpu_mem_limit)
        ] * FLAGS.num_virtual_gpus
        tf.config.experimental.set_virtual_device_configuration(
            gpus[0], virtual_gpus)
        logical_gpus = tf.config.experimental.list_logical_devices("GPU")
        print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")

    train_eval.train_eval_loop(
        offline_train=FLAGS.offline_train,
        offline_train_data_dir=FLAGS.offline_train_data_dir)
def cfr_train(unused_arg):
    exploit_history = list()
    exploit_idx = list()

    tf.enable_eager_execution()
    game = pyspiel.load_game(FLAGS.game, {"players": pyspiel.GameParameter(2)})
    agent_name = "cfr"
    cfr_solver = cfr.CFRSolver(game)
    checkpoint = datetime.now()
    for ep in range(FLAGS.episodes):
        cfr_solver.evaluate_and_update_policy()
        if ep % 100 == 0:
            delta = datetime.now() - checkpoint
            conv = exploitability.exploitability(game,
                                                 cfr_solver.average_policy())
            exploit_idx.append(ep)
            exploit_history.append(conv)
            print(
                "Iteration {} exploitability {} - {} seconds since last checkpoint"
                .format(ep, conv, delta.seconds))
            checkpoint = datetime.now()

    pickle.dump([exploit_idx, exploit_history],
                open(
                    FLAGS.game + "_" + agent_name + "_" + str(FLAGS.episodes) +
                    ".dat", "wb"))

    now = datetime.now()
    policy = cfr_solver.average_policy()
    agent_name = "cfr"
    for pid in [1, 2]:
        policy_to_csv(
            game, policy,
            f"policies/policy_" + now.strftime("%m-%d-%Y_%H-%M") + "_" +
            agent_name + "_" + str(pid + 1) + "_+" + str(ep) + "episodes.csv")
Esempio n. 3
0
def main(argv):
    _ = argv
    tf.logging.set_verbosity(tf.logging.INFO)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    tf.enable_eager_execution(config=config)
    run_experiment()
def make_prediction(img, MODEL_PATH):
    tf.enable_eager_execution()
    to_predict = preprocess(img)
    x_img = to_predict.shape[0]
    y_img = to_predict.shape[1]
    if len(to_predict.shape) == 3:
        z_img = to_predict.shape[2]
    else:
        z_img = 1
    PIXEL_COUNT = x_img * y_img * z_img
    LABEL_COUNT = 1
    to_predict_reshape = np.reshape(to_predict, (1, x_img * y_img * z_img))
    g = tf.Graph()
    with g.as_default():
        session = tf.InteractiveSession()
        model = build_model(PIXEL_COUNT, LABEL_COUNT)
        saver = tf.train.Saver()
        saver.restore(session, MODEL_PATH)
        predictions = model.output.eval(
            session=session,
            feed_dict={model.x_placeholder: to_predict_reshape})
        session.close()
    box_image, x, y, width, height = plot_images(
        to_predict, (predictions + 1) * (64, 32, 64, 32))
    plate = result(to_predict, x, y, height, width)
    return box_image, plate
def image_callback(msg):
    print("Received an image!")

    cv2_img = bridge.imgmsg_to_cv2(msg, "bgr8")
    # try:
    # except (CvBridgeError, e):
    #     print(e)
    # else:
    #     print("ERROR!!!")

    cv2_img = cv2.resize(cv2_img, (224, 224))
    X = cv2_img.reshape(1, 224, 224, 3)
    global sess
    global graph

    tf.enable_eager_execution()

    with graph.as_default():
        set_session(sess)
        Y = model.predict(X)
        angularVelocity = Y
        print(angularVelocity)
        pubCommand = rospy.Publisher('/cmd_vel', Twist)
        global command
        command = Twist()
        command.linear.x = 0.05
        command.linear.y = 0.0
        command.linear.z = 0.0
        command.angular.z = angularVelocity / 2
        pubCommand.publish(command)
        cv2.waitKey(1)
Esempio n. 6
0
def _main() -> None:
    """スクリプトのエントリポイント
    """
    import logging

    logging.basicConfig(level=logging.INFO)

    tfv1.enable_eager_execution()

    raw_train, raw_validation, _, metadata = datasets.get_batch_dataset(shuffle_seed=0)
    base_learning_rate = 0.0001
    model = network_ft.MobileNetV2FT()
    model.compile(
        optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),
        loss="binary_crossentropy",
        metrics=["accuracy"],
    )

    # initial model accuracy
    loss0, accuracy0 = model.evaluate(raw_validation, steps=20)
    logger.info(f"initial loss: {loss0:.2f}, acc: {accuracy0:.2f}")

    # training
    checkpoint = utils.load_checkpoints(model, save_dir="_data/ckpt_finetuning")
    history = model.fit(
        raw_train, epochs=2, validation_data=raw_validation, callbacks=[checkpoint]
    )
    utils.plot_history(history)
Esempio n. 7
0
def degre_tf(tensor_obj):
    tf.enable_eager_execution()
    tensor_obj = tf.reshape(tensor_obj, (30, 35, 35))
    shapes = tensor_obj.get_shape().as_list()
    pivot_np = np.zeros((shapes[0], shapes[1]))
    deg = tf.Variable(pivot_np)
    for i in range(shapes[0]):  #  testing for one sample
        #G = nx.Graph()
        graph_tensor = tensor_obj[i]
        #graph_tensor = tf.Variable(graph_tensor, dtype=tf.float64)
        graph_tensor = graph_tensor.numpy()
        G = nx.from_numpy_matrix(graph_tensor)
        '''
        for x in range(shapes[1]):
            G.add_node(x) ##  adding 35 nodes in the graph
        for x in range(shapes[1]):  # or:  for x in range(shapes[1])
            for y in range(shapes[2]):
                  my_tensor=graph_tensor[x][y]
                  print('my tensor is',my_tensor)
                  #gt=tf.Variable(graph_tensor[x][y],dtype=tf.float64)
                  if tf.math.not_equal(graph_tensor[x][y], 0):
                    G.add_edge(x,y,weight=graph_tensor[x][y])
        '''
        dict = nx.degree_centrality(G)
        #dict=nx.closeness_centrality(G,distance='weight',wf_improved=False)
        #dict=nx.eigenvector_centrality_numpy(G, weight='weight')
        #dict=nx.betweenness_centrality(G, weight= 'weight', endpoints=False,normalized=True)# default settings
        for z in range(shapes[1]):
            pivot_np[i][z] = dict.get(z)

    print('degrees are', pivot_np)
    deg.assign(pivot_np)
    tf.cast(deg, tf.float32)
    return deg
Esempio n. 8
0
def _main() -> None:
    """簡易動作テスト用スクリプト
    """
    import logging
    import tensorflow.compat.v1 as tfv1

    logger = logging.getLogger(__name__)
    logging.basicConfig(level=logging.INFO)

    tfv1.enable_eager_execution()

    # base model
    model = MobileNetV2FT()
    model.base.summary()

    # additional model
    base_learning_rate = 0.0001
    model.build((32, 160, 160, 3))
    model.compile(
        optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),
        loss="binary_crossentropy",
        metrics=["accuracy"],
    )
    model.summary()
    logger.info(f"trainable variables = {len(model.trainable_variables)}")
Esempio n. 9
0
def eager_execution_test():
    #--------------------
    # Eager execution provides an imperative interface to TensorFlow.
    # With eager execution enabled, TensorFlow functions execute operations immediately (as opposed to adding to a graph to be executed later in a tf.compat.v1.Session) and return concrete values (as opposed to symbolic references to a node in a computational graph).
    tf.enable_eager_execution()

    print('6 + 7 = ', tf.multiply(6, 7).numpy())
Esempio n. 10
0
def main(_):
    # Enables eager context for TF 1.x. TF 2.x will use eager by default.
    # This is used to conveniently get a representative dataset generator using
    # TensorFlow training input helper.
    tf.enable_eager_execution()

    model_builder = get_model_builder(FLAGS.model_name)

    with tf.Graph().as_default(), tf.Session() as sess:
        images = tf.placeholder(tf.float32,
                                shape=(1, FLAGS.image_size, FLAGS.image_size,
                                       3),
                                name="images")

        logits, endpoints = model_builder.build_model(images, FLAGS.model_name,
                                                      False)
        if FLAGS.endpoint_name:
            output_tensor = endpoints[FLAGS.endpoint_name]
        else:
            output_tensor = tf.nn.softmax(logits)

        restore_model(sess, FLAGS.ckpt_dir, FLAGS.enable_ema)

        if FLAGS.output_saved_model_dir:
            signature_def_map = {
                "serving_default":
                tf.compat.v1.saved_model.signature_def_utils.
                predict_signature_def({"input": images},
                                      {"output": output_tensor})
            }

            builder = tf.compat.v1.saved_model.Builder(
                FLAGS.output_saved_model_dir)
            builder.add_meta_graph_and_variables(
                sess, ["serve"], signature_def_map=signature_def_map)
            builder.save()
            print("Saved model written to %s" % FLAGS.output_saved_model_dir)

        converter = tf.lite.TFLiteConverter.from_session(
            sess, [images], [output_tensor])
        if FLAGS.quantize:
            if not FLAGS.data_dir:
                raise ValueError(
                    "Post training quantization requires data_dir flag to point to the "
                    "calibration dataset. To export a float model, set "
                    "--quantize=False.")

            converter.representative_dataset = tf.lite.RepresentativeDataset(
                representative_dataset_gen)
            converter.optimizations = [tf.lite.Optimize.DEFAULT]
            converter.inference_input_type = tf.lite.constants.QUANTIZED_UINT8
            converter.inference_output_type = tf.lite.constants.QUANTIZED_UINT8
            converter.target_spec.supported_ops = [
                tf.lite.OpsSet.TFLITE_BUILTINS_INT8
            ]

    tflite_buffer = converter.convert()
    tf.gfile.GFile(FLAGS.output_tflite, "wb").write(tflite_buffer)
    print("tflite model written to %s" % FLAGS.output_tflite)
Esempio n. 11
0
 def setUp(self):
     super().setUp()
     self._solver_cls = deep_evolution_solver.MutationPredictorSolver
     tf.enable_eager_execution()
     self.problem = simple_ising_model.AlternatingChainIsingModel(
         length=20, vocab_size=4)
     self.vocab_size = self.problem.domain.vocab_size
     self.length = self.problem.domain.length
Esempio n. 12
0
def main(argv):
    logging.info('Running main.')

    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    # This is on by default in TF 2.0.
    tf_old.enable_eager_execution()
    train_and_validate()
Esempio n. 13
0
def _main() -> None:
    """簡易テスト用スクリプト
    """
    import tensorflow.compat.v1 as tfv1

    tfv1.enable_eager_execution()
    raw_train, _ = get_dataset()
    for idx, image in enumerate(raw_train.take(2)):
        _show_and_save_image(tf.reshape(image, (28, 28)), f"image_{idx}",
                             f"_data/image_{idx}.png")
Esempio n. 14
0
def rcfr_train(unused_arg):
    tf.enable_eager_execution()
    game = pyspiel.load_game(FLAGS.game, {"players": pyspiel.GameParameter(2)})
    models = [
        rcfr.DeepRcfrModel(
            game,
            num_hidden_layers=1,
            num_hidden_units=64 if FLAGS.game == "leduc_poker" else 13,
            num_hidden_factors=1,
            use_skip_connections=True) for _ in range(game.num_players())
    ]
    patient = rcfr.RcfrSolver(game, models, False, True)
    exploit_history = list()
    exploit_idx = list()

    def _train(model, data):
        data = data.shuffle(1000)
        data = data.batch(12)
        #data = data.repeat(1)
        optimizer = tf.keras.optimizers.Adam(lr=0.005, amsgrad=True)
        for x, y in data:
            optimizer.minimize(
                lambda: tf.losses.huber_loss(y, model(x)),  # pylint: disable=cell-var-from-loop
                model.trainable_variables)

    agent_name = "rcfr"
    checkpoint = datetime.now()
    for iteration in range(FLAGS.episodes):
        if (iteration % 100) == 0:
            delta = datetime.now() - checkpoint
            conv = pyspiel.exploitability(game, patient.average_policy())
            exploit_idx.append(iteration)
            exploit_history.append(conv)
            print(
                "[RCFR] Iteration {} exploitability {} - {} seconds since last checkpoint"
                .format(iteration, conv, delta.seconds))
            checkpoint = datetime.now()
        patient.evaluate_and_update_policy(_train)

    pickle.dump([exploit_idx, exploit_history],
                open(
                    FLAGS.game + "_" + agent_name + "_" + str(FLAGS.episodes) +
                    ".dat", "wb"))

    now = datetime.now()
    policy = patient.average_policy()

    for pid in [1, 2]:
        policy_to_csv(
            game, policy, f"policies/policy_" +
            now.strftime("%m-%d-%Y_%H-%M") + "_" + agent_name + "_" +
            str(pid + 1) + "_+" + str(FLAGS.episodes) + "episodes.csv")
Esempio n. 15
0
def _main() -> None:
    """簡易動作用スクリプト
    """
    import logging

    import tensorflow.compat.v1 as tfv1

    logging.basicConfig(level=logging.INFO)
    tfv1.enable_eager_execution()

    dataset_train, _ = dataset.get_batch_dataset()
    train(dataset_train, epochs=2)
    utils.save_gif("_data/", "image_at_epoch_*", "_data/dcgan.gif")
def main(_):
    tf.enable_eager_execution()
    tf.logging.set_verbosity(tf.logging.INFO)

    required_flags = ['input_tfrecord_paths', 'output_tfrecord_path',
                      'inference_graph']
    for flag_name in required_flags:
        if not getattr(FLAGS, flag_name):
            raise ValueError('Flag --{} is required'.format(flag_name))

    if FLAGS.gpu_device:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu_device)

    output_folder = "/"
    output_folder = output_folder.join(FLAGS.output_tfrecord_path.split("/")[:-1])

    if not os.path.exists(output_folder):
        os.mkdir(output_folder)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        input_tfrecord_paths = [
            v for v in FLAGS.input_tfrecord_paths.split(',') if v]
        tf.logging.info('Reading input from %d files', len(input_tfrecord_paths))
        serialized_example_tensor, image_tensor = detection_inference.build_input(
            input_tfrecord_paths, num_additional_channels=FLAGS.num_additional_channels)
        tf.logging.info('Reading graph and building model...')
        (detected_boxes_tensor, detected_scores_tensor,
         detected_labels_tensor) = detection_inference.build_inference_graph(
            image_tensor, FLAGS.inference_graph)

        tf.logging.info('Running inference and writing output to {}'.format(
            FLAGS.output_tfrecord_path))
        sess.run(tf.local_variables_initializer())
        tf.train.start_queue_runners()
        with tf.python_io.TFRecordWriter(
                FLAGS.output_tfrecord_path) as tf_record_writer:
            try:
                for counter in itertools.count():
                    tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10,
                                           counter)
                    tf_example = detection_inference.infer_detections_and_add_to_example(
                        serialized_example_tensor, detected_boxes_tensor,
                        detected_scores_tensor, detected_labels_tensor,
                        FLAGS.discard_image_pixels)

                    tf_record_writer.write(tf_example.SerializeToString())
            except tf.errors.OutOfRangeError:
                tf.logging.info('Finished processing records')
Esempio n. 17
0
def check_dataset(input_fn, params):
    tf.enable_eager_execution()
    dataset = input_fn(params)
    dataset_iter = dataset.make_one_shot_iterator()
    tensor, _ = next(dataset_iter)
    enc = fetch_encoder(params)

    for p in tensor[:1]:
        txt = enc.decode(p)

    print('-' * 50)
    print(txt[:500], '\n\n...\n\n', txt[-500:])
    print('-' * 50)
    exit()
def main(_):
    tf.enable_eager_execution()

    envs = [
        'HalfCheetah-v1', 'Hopper-v1', 'Ant-v1', 'Walker2d-v1', 'Reacher-v1'
    ]
    for ienv, env in enumerate(envs):
        print('Processing environment %d of %d: %s' %
              (ienv + 1, len(envs), env))
        h5_filename = os.path.join(FLAGS.src_data_dir, '%s.h5' % env)
        trajectories = h5py.File(h5_filename, 'r')

        if (set(trajectories.keys()) != set(
            ['a_B_T_Da', 'len_B', 'obs_B_T_Do', 'r_B_T'])):
            raise ValueError('Unexpected key set in file %s' % h5_filename)

        replay_buffer = ReplayBuffer()

        if env.find('Reacher') > -1:
            max_len = 50
        else:
            max_len = 1000

        for i in range(50):
            print('  Processing trajectory %d of 50 (len = %d)' %
                  (i + 1, trajectories['len_B'][i]))
            for j in range(trajectories['len_B'][i]):
                mask = 1
                if j + 1 == trajectories['len_B'][i]:
                    if trajectories['len_B'][i] == max_len:
                        mask = 1
                    else:
                        mask = 0
                replay_buffer.push_back(
                    trajectories['obs_B_T_Do'][i][j],
                    trajectories['a_B_T_Da'][i][j],
                    trajectories['obs_B_T_Do'][i][(j + 1) %
                                                  trajectories['len_B'][i]],
                    [trajectories['r_B_T'][i][j]], [mask],
                    j == trajectories['len_B'][i] - 1)

        replay_buffer_var = contrib_eager_python_tfe.Variable(
            '', name='expert_replay_buffer')
        saver = contrib_eager_python_tfe.Saver([replay_buffer_var])
        odir = os.path.join(FLAGS.dst_data_dir, env)
        print('Saving results to checkpoint in directory: %s' % odir)
        tf.gfile.MakeDirs(odir)
        replay_buffer_var.assign(pickle.dumps(replay_buffer))
        saver.save(os.path.join(odir, 'expert_replay_buffer'))
Esempio n. 19
0
def neurd_train(unudes_arg):
    tf.enable_eager_execution()

    game = pyspiel.load_game(FLAGS.game, {"players": pyspiel.GameParameter(2)})

    models = []
    for _ in range(game.num_players()):
        models.append(
            neurd.DeepNeurdModel(game,
                                 num_hidden_layers=1,
                                 num_hidden_units=13,
                                 num_hidden_factors=8,
                                 use_skip_connections=True,
                                 autoencode=False))
    solver = neurd.CounterfactualNeurdSolver(game, models)

    def _train(model, data):
        neurd.train(model,
                    data,
                    batch_size=100,
                    step_size=1,
                    threshold=2,
                    autoencoder_loss=(None))

    exploit_history = list()
    for ep in range(FLAGS.episodes):
        solver.evaluate_and_update_policy(_train)
        if ep % 100 == 0:
            conv = pyspiel.exploitability(game, solver.average_policy())
            exploit_history.append(conv)
            print("Iteration {} exploitability {}".format(ep, conv))

    now = datetime.now()
    policy = solver.average_policy()
    agent_name = "neurd"
    for pid in [1, 2]:
        policy_to_csv(
            game, policy,
            f"policies/policy_" + now.strftime("%m-%d-%Y_%H-%M") + "_" +
            agent_name + "_" + str(pid + 1) + "_+" + str(ep) + "episodes.csv")

    plt.plot([i for i in range(len(exploit_history))], exploit_history)
    plt.ylim(0.01, 1)
    plt.yticks([1, 0.1, 0.01])
    plt.yscale("log")
    plt.xscale("log")
    plt.show()
Esempio n. 20
0
def main():
    tfv1.enable_eager_execution()
    logger.info(f"execute eagerly = {tf.executing_eagerly()}")
    logger.info(f"is gpu available = {tf.test.is_gpu_available()}")

    logger.info("get dataset...")
    train, _ = datasets.get_dataset()

    logger.info("learning...")
    network = model.MNISTModel()
    trainer = model.Trainer()
    checkpoint = model.Checkpoint(network=network, optimizer=trainer.optimizer)

    start_learning = time.time()
    start(network, trainer, train, 5, checkpoint)
    end_learning = time.time()
    logger.info(f"learning time: {end_learning - start_learning} sec")
Esempio n. 21
0
def main():
    tfv1.enable_eager_execution()
    logger.info(f"execute eagerly = {tf.executing_eagerly()}")
    logger.info(f"is gpu available = {tf.test.is_gpu_available()}")

    logger.info("get dataset...")
    _, test = datasets.get_dataset()

    logger.info("predicting...")
    network = model.MNISTModel()
    predictor = model.Predictor()
    model.Checkpoint(network=network)

    start(network, predictor, test)
    logger.info(
        f"loss: {predictor.predict_loss.result()}, accuracy: {predictor.predict_accuracy.result()}"
    )
Esempio n. 22
0
def _main() -> None:
    """簡易動作テスト用スクリプト
    """
    import tensorflow.compat.v1 as tfv1

    tfv1.enable_eager_execution()

    # base model
    model = MobileNetV2FE()
    model.base.summary()

    # additional model
    base_learning_rate = 0.0001
    model.compile(
        optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),
        loss="binary_crossentropy",
        metrics=["accuracy"],
    )
    model.build((32, 160, 160, 3))
    model.summary()
def main(_):
  flags.mark_flags_as_required(["task"])

  if FLAGS.module_import:
    import_modules(FLAGS.module_import)

  gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)

  total_examples = 0
  tf.enable_eager_execution()
  task = t5.data.TaskRegistry.get(FLAGS.task)

  ds = task.get_dataset(sequence_length=sequence_length(),
                        split=FLAGS.split,
                        use_cached=False,
                        shuffle=False)

  keys = re.findall(r"{([\w+]+)}", FLAGS.format_string)
  def _example_to_string(ex):
    key_to_string = {}
    for k in keys:
      if k in ex:
        v = ex[k].numpy().tolist()
        if (FLAGS.detokenize
            and v and isinstance(v, list)
            and isinstance(v[0], int)):
          s = task.output_features[k].vocabulary.decode([abs(i) for i in v])
        elif isinstance(v, bytes):
          s = v.decode("utf-8")
        else:
          s = " ".join(str(i) for i in v)
        key_to_string[k] = s
      else:
        key_to_string[k] = ""
    return FLAGS.format_string.format(**key_to_string)

  for ex in ds:
    print(_example_to_string(ex))
    total_examples += 1
    if total_examples == FLAGS.max_examples:
      return
def main(_):
  flags.mark_flags_as_required(["task"])

  if FLAGS.module_import:
    import_modules(FLAGS.module_import)

  gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)

  total_examples = 0
  tf.enable_eager_execution()
  task = t5.data.TaskRegistry.get(FLAGS.task)
  # Do a load to make sure the files exist before grabbing file path
  task.tfds_dataset.load(FLAGS.split, shuffle_files=False)
  files = task.tfds_dataset.files(FLAGS.split)
  def _example_to_string(ex):
    key_to_string = {}
    for k in ("inputs", "targets"):
      if k in ex:
        v = ex[k].numpy()
        key_to_string[k] = (
            " ".join(str(i) for i in v) if FLAGS.tokenize
            else v.decode("utf-8"))
      else:
        key_to_string[k] = ""
    return FLAGS.format_string.format(**key_to_string)

  for shard_path in files:
    logging.info("Processing shard: %s", shard_path)
    ds = task.tfds_dataset.load_shard(shard_path)
    ds = task.preprocess_text(ds)
    if FLAGS.tokenize:
      ds = t5.data.encode_string_features(
          ds, task.output_features, keys=task.output_features,
          copy_plaintext=True)
      ds = task.preprocess_tokens(ds, sequence_length())

    for ex in ds:
      print(_example_to_string(ex))
      total_examples += 1
      if total_examples == FLAGS.max_examples:
        return
Esempio n. 25
0
def main(_):
  # Enables eager context for TF 1.x. TF 2.x will use eager by default.
  # This is used to conveniently get a representative dataset generator using
  # TensorFlow training input helper.
  tf.enable_eager_execution()

  converter = tf.lite.TFLiteConverter.from_saved_model(
      FLAGS.saved_model_dir,
      input_arrays=[FLAGS.input_name],
      output_arrays=[FLAGS.output_name])
  # Chooses a tf.lite.Optimize mode:
  # https://www.tensorflow.org/api_docs/python/tf/lite/Optimize
  converter.optimizations = [tf.lite.Optimize.DEFAULT]
  converter.representative_dataset = tf.lite.RepresentativeDataset(
      representative_dataset_gen)
  if FLAGS.require_int8:
    converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]

  tflite_buffer = converter.convert()
  tf.gfile.GFile(FLAGS.output_tflite, "wb").write(tflite_buffer)
  print("tflite model written to %s" % FLAGS.output_tflite)
Esempio n. 26
0
File: utils.py Progetto: yyht/GPTNeo
def check_dataset(input_fn):
    tf.enable_eager_execution()
    dataset = input_fn(params)
    dataset_iter = dataset.make_one_shot_iterator()
    tensor, _ = next(dataset_iter)
    enc = fetch_encoder(params)

    for p in tensor[:1]:
        txt = enc.decode(p)
    #txt = enc.decode(tensor)
    max_id = tf.reduce_max(tensor)
    min_id = tf.reduce_min(tensor)

    print(tensor)
    print(tensor.shape)
    print('-' * 50)
    print(txt[:500], '\n...\n', txt[-500:])
    print('-' * 50)
    print('min token id: ', min_id)
    print('max token id: ', max_id)
    exit()
Esempio n. 27
0
def main(unused_argv):
    tf.enable_eager_execution()
    # WARNING: do not set tf.debugging.set_log_device_placement(True)
    # This can cause an error when using Eager Execution and SavedModels.

    examples = generate_realm_examples_with_model_refresh()

    if FLAGS.local_debug:
        featurizer = load_featurizer()

        profile.reset()
        start_time = time.time()
        for i, (query, cands, _) in enumerate(examples):
            if i % 50 == 0:
                print('Example', i)
                print('Total time elapsed: {}'.format(time.time() -
                                                      start_time))
                print(query)
                print('Originating doc UID: {}'.format(query.orig_doc_uid))
                for cand in cands:
                    print('Doc UID: {}'.format(cand.uid))
                    print(
                        featurizer.tokenizer.token_ids_to_str(
                            cand.title_token_ids))
                    print(
                        featurizer.tokenizer.token_ids_to_str(
                            cand.body_token_ids))
                print()
            if i == 1000:
                break
        profile.print_report()
    else:
        preprocessing.push_examples(
            example_generator=(tf_ex for query, cands, tf_ex in examples),
            port=FLAGS.port,
            max_queue_size=FLAGS.max_queue_size,
            queue_timeout=30.0)
Esempio n. 28
0
                    default=None,
                    choices=[None, "ZLIB", "GZIP"],
                    help='compression the dataset is compressed with')
args = parser.parse_args()

from src.createtask import create_registry

create_registry(os.path.join(args.dir, args.train),
                os.path.join(args.dir, args.val), args.taskname,
                args.compression)
args.tpu_address = f"grpc://{args.tpu_address}:8470"

if args.tpu_address != None:
    tpu = tf.distribute.cluster_resolver.TPUClusterResolver(
        tpu=args.tpu_address)
    tf.enable_eager_execution()
    tf.config.experimental_connect_to_cluster(tpu)
    tf.tpu.experimental.initialize_tpu_system(tpu)
    print("All devices: ", tf.config.list_logical_devices('TPU'))
    tf.disable_v2_behavior()

tf.get_logger().propagate = False
py_logging.root.setLevel('INFO')


@contextmanager
def tf_verbosity_level(level):
    og_level = tf.logging.get_verbosity()
    tf.logging.set_verbosity(level)
    yield
    tf.logging.set_verbosity(og_level)
Esempio n. 29
0
def load_tf_graph_def(graph_file_name: str = "", is_binary: bool = True, checkpoint: str = "",
                      model_dir: str = "", saved_model_tags: list = [], meta_graph_file: str = "",
                      user_output_node_names_list: list = []):
    # As a provisional solution, use a native TF methods to load a model protobuf
    graph_def = tf_v1.GraphDef()
    if isinstance(graph_file_name, str) and (re.match('.*\.(ckpt|meta)$', graph_file_name)):
        print('[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" '
              'extension.\n'
              'It means that the model is not frozen.\n'
              'To load non frozen model to Model Optimizer run:'
              '\n\n1. For "*.ckpt" file:'
              '\n- if inference graph is in binary format'
              '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"'
              '\n- if inference graph is in text format'
              '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text '
              '--input_checkpoint "path/to/*.ckpt"'
              '\n\n2. For "*.meta" file:'
              '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
    variables_values = {}
    try:
        if graph_file_name and not meta_graph_file and not checkpoint:
            # frozen graph
            return read_file_to_graph_def(graph_def, graph_file_name, is_binary), variables_values, 'tf'
        if graph_file_name and not meta_graph_file and checkpoint:
            # inference graph and checkpoint
            graph_def = read_file_to_graph_def(graph_def, graph_file_name, is_binary)
            outputs = get_output_node_names_list(graph_def, user_output_node_names_list)
            if os.path.isfile(checkpoint):
                graph_def = freeze_checkpoint(graph_def=graph_def, checkpoint=checkpoint, output_node_names=outputs)
            elif os.path.isdir(checkpoint):
                graph_def, variables_values = freeze_checkpoints(graph_def=graph_def, checkpoint_dir=checkpoint,
                                                                 output_node_names=outputs)
            # we are sure that checkpoint is existing file or directory due to cli_parser configuration
            return graph_def, variables_values, 'tf'
        if not graph_file_name and meta_graph_file:
            meta_graph_file = deducing_metagraph_path(meta_graph_file)
            input_meta_graph_def = read_file_to_graph_def(tf_v1.MetaGraphDef(), meta_graph_file, is_binary)
            # pylint: disable=no-member
            with tf_v1.Session() as sess:
                restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)
                restorer.restore(sess, re.sub('\.meta$', '', meta_graph_file))
                outputs = get_output_node_names_list(input_meta_graph_def.graph_def, user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, input_meta_graph_def.graph_def,
                                                                            outputs)
                return graph_def, variables_values, 'tf'
        if model_dir:
            # saved model directory
            try:
                env_setup = get_environment_setup("tf")
                # enable eager execution temporarily while TensorFlow 2 model is being loaded
                tf_v1.enable_eager_execution()
                # code to extract GraphDef for TF 2.0 SavedModel format
                # tf.saved_model.load function throws TypeError for TF 1.x SavedModel format in case TF 1.x installed
                imported = tf.saved_model.load(model_dir, saved_model_tags) # pylint: disable=E1120
                # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed
                concrete_func = imported.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
                # the aggressive inlining parameter needs to freeze a table of embeddings for Keras Embedding operation
                # and a model with Embedding operation cannot properly converted to IR without this function parameter
                if "tensorflow" in env_setup and env_setup["tensorflow"] >= LooseVersion("2.2.0"):
                    frozen_func = convert_variables_to_constants_v2(concrete_func,
                                                                    lower_control_flow=False,
                                                                    aggressive_inlining=True)  # pylint: disable=E1123
                else:
                    frozen_func = convert_variables_to_constants_v2(concrete_func,
                                                                    lower_control_flow=False)  # pylint: disable=E1123
                graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
                # disable eager execution since next steps are executed with a graph in non-eager mode
                tf_v1.disable_eager_execution()
                return graph_def, variables_values, 'tf2'
            except (TypeError, KeyError):
                # disable eager execution since TensorFlow 1 model is handled
                tf_v1.disable_eager_execution()
                # code to extract GraphDef for TF 1.0 SavedModel format
                tags = saved_model_tags if saved_model_tags is not None else [tf_v1.saved_model.tag_constants.SERVING]
                with tf_v1.Session() as sess:
                    meta_graph_def = tf_v1.saved_model.loader.load(sess, tags, model_dir)
                    outputs = get_output_node_names_list(meta_graph_def.graph_def, user_output_node_names_list)
                    graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, outputs)
                    return graph_def, variables_values, 'tf'
            except Exception as e:
                raise FrameworkError('SavedModel format load failure: {}', e) from e
    except Exception as e:
        raise FrameworkError('Cannot load input model: {}', e) from e
    raise Error("Unknown configuration of input model parameters")
import os
import os.path as osp
import argparse

import tensorflow
import tensorflow.compat.v1 as tf

import numpy as np

CURRENT_DIR = osp.dirname(__file__)
sys.path.append(osp.join(CURRENT_DIR, '../..'))

from utils.misc_utils import load_cfgs
from siamese_datasets.dataloader import DataLoader

tf.enable_eager_execution()
tensorflow.executing_eagerly()

TF_MAJOR_VERSION = int(tf.__version__.split(".")[0])
if TF_MAJOR_VERSION == 1:
    import tensorflow.contrib.slim as slim
    from tensorflow.contrib import quantize as contrib_quantize
else:
    import tf_slim as slim
    tf.disable_v2_behavior()

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='')

    parser.add_argument(
        '--frozen_graph_model',