コード例 #1
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    if FLAGS.tapas_verbosity:
        tf.get_logger().setLevel(FLAGS.tapas_verbosity)

    task = tasks.Task[FLAGS.task]
    output_dir = os.path.join(FLAGS.output_dir, task.name.lower())
    model_dir = FLAGS.model_dir or os.path.join(output_dir, 'model')
    mode = Mode[FLAGS.mode.upper()]
    _check_options(output_dir, task, mode)

    if mode == Mode.CREATE_DATA:
        # Retrieval interactions are model dependant and are created in advance.
        if task != tasks.Task.NQ_RETRIEVAL:
            _print('Creating interactions ...')
            token_selector = _get_token_selector()
            task_utils.create_interactions(task, FLAGS.input_dir, output_dir,
                                           token_selector)
        _print('Creating TF examples ...')
        _create_all_examples(task,
                             FLAGS.bert_vocab_file,
                             FLAGS.test_mode,
                             test_batch_size=FLAGS.test_batch_size,
                             output_dir=output_dir)

    elif mode in (Mode.TRAIN, Mode.PREDICT_AND_EVALUATE, Mode.PREDICT):
        _print('Training or predicting ...')
        tpu_options = TpuOptions(use_tpu=FLAGS.use_tpu,
                                 tpu_name=FLAGS.tpu_name,
                                 tpu_zone=FLAGS.tpu_zone,
                                 gcp_project=FLAGS.gcp_project,
                                 master=FLAGS.master,
                                 num_tpu_cores=FLAGS.num_tpu_cores,
                                 iterations_per_loop=FLAGS.iterations_per_loop)
        _train_and_predict(
            task=task,
            tpu_options=tpu_options,
            test_batch_size=FLAGS.test_batch_size,
            train_batch_size=FLAGS.train_batch_size,
            gradient_accumulation_steps=FLAGS.gradient_accumulation_steps,
            bert_config_file=FLAGS.bert_config_file,
            init_checkpoint=FLAGS.init_checkpoint,
            test_mode=FLAGS.test_mode,
            mode=mode,
            output_dir=output_dir,
            model_dir=model_dir,
            loop_predict=FLAGS.loop_predict,
        )

    elif mode == Mode.EVALUATE:
        _eval(
            task=task,
            output_dir=output_dir,
            model_dir=model_dir,
        )

    else:
        raise ValueError(f'Unknown mode: {mode}')
コード例 #2
0
    def add_sub_graph_call_output_tensors_transposes(node: Node):
        """
        Adds transpose operations to the output nodes if they are 4D to change layout from NCHW to NHWC.
        :param node: the node to add transposes to the output nodes to.
        :return: None
        """
        try:
            import tensorflow.compat.v1 as tf_v1
            # disable eager execution of TensorFlow 2 environment immediately
            tf_v1.disable_eager_execution()
        except ImportError:
            import tensorflow as tf_v1
        # in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work
        tf_v1.get_logger().setLevel("ERROR")

        from openvino.tools.mo.front.tf.partial_infer.tf import get_subgraph_output_tensors, add_node_def_to_subgraph
        _, output_tensors = get_subgraph_output_tensors(node)

        # transpose permutation constant
        nhwc_to_nchw_constant = tf_v1.constant(nhwc_to_nchw_permute,
                                               dtype=tf_v1.int32,
                                               name=nhwc_to_nchw_constant_name)

        # dummy node which we can refer to as input in the transpose for the output node
        dummy_node = tf_v1.constant(value=[[[[1]]]],
                                    dtype=tf_v1.float32,
                                    name='random_dummy_name')

        new_out_tensor_names = list()
        for out_tensor_name in node['output_tensors_names']:
            out_name, out_port = out_tensor_name.split(':')
            if len(
                    output_tensors[int(out_port)].shape
            ) == 4:  # TODO think about better check whether transpose is required
                out_transpose_name = out_name + '_port_' + out_port + '_transpose'
                transpose = tf_v1.transpose(dummy_node,
                                            nhwc_to_nchw_constant,
                                            name=out_transpose_name)

                # starting from TF 1.8 it is not possible to modify the "node_def" of the "tf.op", so we create a copy,
                # update it and use further
                new_input_names = transpose.op.node_def.input[:]
                new_input_names[0] = out_tensor_name
                new_node_def = copy.deepcopy(transpose.op.node_def)
                new_node_def.input[:] = new_input_names
                add_node_def_to_subgraph(node,
                                         new_node_def,
                                         position=len(node['nodes_order']))
                new_out_tensor_names.append(out_transpose_name)
            else:
                new_out_tensor_names.append(out_tensor_name)

        # update output tensor names with transposes operations
        node['output_tensors_names'] = new_out_tensor_names
コード例 #3
0
ファイル: proteinfer.py プロジェクト: iponamareva/proteinfer
def main(_):
    # TF logging is too noisy otherwise.
    tf.get_logger().setLevel(tf.logging.ERROR)

    if FLAGS.reporting_threshold == 0.:
        raise ValueError(
            'The reporting_threshold flag was 0. Please supply a '
            'value between 0 (exclusive) and 1 (inclusive). A value '
            'of zero will report every label for every protein.')
    load_assets_and_run(input_fasta_path=FLAGS.i,
                        output_path=FLAGS.o,
                        num_ensemble_elements=FLAGS.num_ensemble_elements,
                        model_cache_path=FLAGS.model_cache_path,
                        reporting_threshold=FLAGS.reporting_threshold)
コード例 #4
0
def main(_):
    tf.compat.v1.enable_eager_execution()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0"
    logger = tf.get_logger()
    logger.setLevel(tf.logging.ERROR)
    np.set_printoptions(suppress=True, precision=6, linewidth=120)
    run_atari()
コード例 #5
0
def hvd_try_init():
    global IS_HVD_INIT
    if not IS_HVD_INIT and hvd is not None:
        hvd.init()
        IS_HVD_INIT = True

        tf.get_logger().propagate = False
        if hvd.rank() == 0:
            tf.logging.set_verbosity('INFO')
        else:
            tf.logging.set_verbosity('WARN')
コード例 #6
0
    def __init__(self, model):

        export_dir = os.path.join(os.getcwd(), model)
        self.logger = logging.getLogger(ct.LOGGER)
        self.sess = tf.Session()
        tf.get_logger().setLevel(self.logger.getEffectiveLevel())
        tf.saved_model.loader.load(
            sess=self.sess,
            tags=[tf.saved_model.tag_constants.SERVING],
            export_dir=export_dir)
        graph = tf.get_default_graph()

        self.pegx_t = graph.get_tensor_by_name("pegx:0")
        self.linkx_t = graph.get_tensor_by_name("linkx:0")
        self.locx_t = graph.get_tensor_by_name("locx:0")
        self.is_training_t = graph.get_tensor_by_name("is_training:0")

        self.pwin_t = graph.get_tensor_by_name("pwin:0")
        self.movelogits_t = graph.get_tensor_by_name("movelogits:0")

        self.use_recents = (int(self.locx_t.shape[3]) == 3)
コード例 #7
0
ファイル: utils.py プロジェクト: sometimesiwork/gpt-neo
def setup_logging(args):
    Path("logs").mkdir(exist_ok=True)
    tf.logging.set_verbosity(logging.INFO)
    tf.get_logger().propagate = False  # Remove double log on console
    name = os.path.splitext(os.path.basename(args.model))[0]
    handlers = [
        logging.FileHandler(f"logs/{name}.log"),
        logging.StreamHandler(sys.stdout)
    ]
    logger = logging.getLogger("tensorflow")
    logger.handlers = handlers
    return logger
コード例 #8
0
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')

  if FLAGS.tapas_verbosity:
    tf.get_logger().setLevel(FLAGS.tapas_verbosity)

  task = tasks.Task[FLAGS.task]
  output_dir = os.path.join(FLAGS.output_dir, task.name.lower())
  model_dir = FLAGS.model_dir or os.path.join(output_dir, 'model')
  mode = Mode[FLAGS.mode.upper()]
  _check_options(output_dir, task, mode)

  if mode in (Mode.PREDICT_AND_EVALUATE, Mode.PREDICT):
    _print('Training or predicting ...')
    tpu_options = TpuOptions(
        use_tpu=FLAGS.use_tpu,
        tpu_name=FLAGS.tpu_name,
        tpu_zone=FLAGS.tpu_zone,
        gcp_project=FLAGS.gcp_project,
        master=FLAGS.master,
        num_tpu_cores=FLAGS.num_tpu_cores,
        iterations_per_loop=FLAGS.iterations_per_loop)
    _train_and_predict(
        task=task,
        tpu_options=tpu_options,
        test_batch_size=FLAGS.test_batch_size,
        train_batch_size=FLAGS.train_batch_size,
        gradient_accumulation_steps=FLAGS.gradient_accumulation_steps,
        bert_config_file=FLAGS.bert_config_file,
        init_checkpoint=FLAGS.init_checkpoint,
        test_mode=FLAGS.test_mode,
        mode=mode,
        output_dir=output_dir,
        model_dir=model_dir,
        loop_predict=FLAGS.loop_predict,
    )

  else:
    raise ValueError(f'Not Predictive Mode: {mode}')
コード例 #9
0
def logging_setup(logger_name):
    formatter = logging.Formatter(
        '%(asctime)s | from: %(name)s  [%(levelname)s]: %(message)s')
    logger = logging.getLogger(logger_name)
    stdout_handler = logging.StreamHandler(stream=sys.stdout)
    stdout_handler.setLevel(logging.INFO)
    stdout_handler.setFormatter(formatter)
    logger.addHandler(stdout_handler)
    logger.setLevel(logging.DEBUG)  # include all kind of messages
    tf_logger = tf.get_logger()
    tf_logger.addFilter(
        NotTFDepricatedMessage())  # not pass tf depricated messages
    return logger
コード例 #10
0
def tf_logging(
    level: str = "INFO",
    filters: Optional[List[Callable[[Any], bool]]] = None,
    additional_handlers: Optional[List[Any]] = None,
) -> None:
    """Initializes Tensorflow logging."""
    tf.get_logger().propagate = False  # https://stackoverflow.com/a/33664610
    tf.get_logger().setLevel(level)
    for h in additional_handlers or []:
        tf.get_logger().addHandler(h)
    for f in filters or []:
        tf.get_logger().addFilter(f)
コード例 #11
0
ファイル: model.py プロジェクト: ffagerholm/melody-generator
import os

import magenta
from magenta.models.melody_rnn import melody_rnn_model
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.models.shared import sequence_generator_bundle
from magenta.music.protobuf import generator_pb2
from magenta.music.protobuf import music_pb2
import tensorflow.compat.v1 as tf
# set tensorflow logger level to avoid unnecessary output
tf.get_logger().setLevel('ERROR')


class MelodyGenerator():
    """RNN model for generating melodies based on a few notes."""
    def __init__(self, bundle_path: str):
        """Initialize model from bundle.

        bundle_path (str): Path to the MelodyRnnSequenceGenerator to use for generation.
        """
        bundle_file = os.path.expanduser(bundle_path)
        bundle = sequence_generator_bundle.read_bundle_file(bundle_file)

        config_id = bundle.generator_details.id
        config = melody_rnn_model.default_configs[config_id]

        self.generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
            model=melody_rnn_model.MelodyRnnModel(config),
            details=config.details,
            steps_per_quarter=config.steps_per_quarter,
            checkpoint=None,
コード例 #12
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)
    tf.get_logger().propagate = False

    albert_config = modeling.AlbertConfig.from_json_file(
        FLAGS.albert_config_file)

    validate_flags_or_throw(albert_config)

    tf.gfile.MakeDirs(FLAGS.output_dir)
    print("Output:", FLAGS.output_dir)

    tokenizer = fine_tuning_utils.create_vocab(
        vocab_file=FLAGS.vocab_file,
        do_lower_case=FLAGS.do_lower_case,
        spm_model_file=FLAGS.spm_model_file,
        hub_module=FLAGS.albert_hub_module_handle)

    tpu_cluster_resolver = None
    if FLAGS.use_tpu and FLAGS.tpu_name:
        tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
            FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

    is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
    if FLAGS.do_train:
        iterations_per_loop = int(
            min(FLAGS.iterations_per_loop, FLAGS.save_checkpoints_steps))
    else:
        iterations_per_loop = FLAGS.iterations_per_loop
    run_config = contrib_tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=FLAGS.master,
        model_dir=FLAGS.output_dir,
        keep_checkpoint_max=0,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        tpu_config=contrib_tpu.TPUConfig(
            iterations_per_loop=iterations_per_loop,
            num_shards=FLAGS.num_tpu_cores,
            per_host_input_for_training=is_per_host))

    train_examples = None
    num_train_steps = None
    num_warmup_steps = None
    train_examples = squad_utils.read_squad_examples(
        input_file=FLAGS.train_file, is_training=True)
    num_train_steps = int(
        len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
    if FLAGS.do_train:
        num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)

        # Pre-shuffle the input to avoid having to make a very large shuffle
        # buffer in in the `input_fn`.
        rng = random.Random(12345)
        rng.shuffle(train_examples)

    model_fn = squad_utils.v2_model_fn_builder(
        albert_config=albert_config,
        init_checkpoint=FLAGS.init_checkpoint,
        learning_rate=FLAGS.learning_rate,
        num_train_steps=num_train_steps,
        num_warmup_steps=num_warmup_steps,
        use_tpu=FLAGS.use_tpu,
        use_one_hot_embeddings=FLAGS.use_tpu,
        max_seq_length=FLAGS.max_seq_length,
        start_n_top=FLAGS.start_n_top,
        end_n_top=FLAGS.end_n_top,
        dropout_prob=FLAGS.dropout_prob,
        hub_module=FLAGS.albert_hub_module_handle)

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    estimator = contrib_tpu.TPUEstimator(
        use_tpu=FLAGS.use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=FLAGS.train_batch_size,
        predict_batch_size=FLAGS.predict_batch_size)

    if FLAGS.do_train:
        # We write to a temporary file to avoid storing very large constant tensors
        # in memory.

        if not tf.gfile.Exists(FLAGS.train_feature_file):
            train_writer = squad_utils.FeatureWriter(filename=os.path.join(
                FLAGS.train_feature_file),
                                                     is_training=True)
            squad_utils.convert_examples_to_features(
                examples=train_examples,
                tokenizer=tokenizer,
                max_seq_length=FLAGS.max_seq_length,
                doc_stride=FLAGS.doc_stride,
                max_query_length=FLAGS.max_query_length,
                is_training=True,
                output_fn=train_writer.process_feature,
                do_lower_case=FLAGS.do_lower_case)
            train_writer.close()

        tf.logging.info("***** Running training *****")
        tf.logging.info("  Num orig examples = %d", len(train_examples))
        # tf.logging.info("  Num split examples = %d", train_writer.num_features)
        tf.logging.info("  Batch size = %d", FLAGS.train_batch_size)
        tf.logging.info("  Num steps = %d", num_train_steps)
        del train_examples

        train_input_fn = squad_utils.input_fn_builder(
            input_file=FLAGS.train_feature_file,
            seq_length=FLAGS.max_seq_length,
            is_training=True,
            drop_remainder=True,
            use_tpu=FLAGS.use_tpu,
            bsz=FLAGS.train_batch_size,
            is_v2=True)
        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)

    if FLAGS.do_predict:
        with tf.gfile.Open(FLAGS.predict_file) as predict_file:
            prediction_json = json.load(predict_file)["data"]
        eval_examples = squad_utils.read_squad_examples(
            input_file=FLAGS.predict_file, is_training=False)

        if (tf.gfile.Exists(FLAGS.predict_feature_file)
                and tf.gfile.Exists(FLAGS.predict_feature_left_file)):
            tf.logging.info("Loading eval features from {}".format(
                FLAGS.predict_feature_left_file))
            with tf.gfile.Open(FLAGS.predict_feature_left_file, "rb") as fin:
                eval_features = pickle.load(fin)
        else:
            eval_writer = squad_utils.FeatureWriter(
                filename=FLAGS.predict_feature_file, is_training=False)
            eval_features = []

            def append_feature(feature):
                eval_features.append(feature)
                eval_writer.process_feature(feature)

            squad_utils.convert_examples_to_features(
                examples=eval_examples,
                tokenizer=tokenizer,
                max_seq_length=FLAGS.max_seq_length,
                doc_stride=FLAGS.doc_stride,
                max_query_length=FLAGS.max_query_length,
                is_training=False,
                output_fn=append_feature,
                do_lower_case=FLAGS.do_lower_case)
            eval_writer.close()

            with tf.gfile.Open(FLAGS.predict_feature_left_file, "wb") as fout:
                pickle.dump(eval_features, fout)

        tf.logging.info("***** Running predictions *****")
        tf.logging.info("  Num orig examples = %d", len(eval_examples))
        tf.logging.info("  Num split examples = %d", len(eval_features))
        tf.logging.info("  Batch size = %d", FLAGS.predict_batch_size)

        predict_input_fn = squad_utils.input_fn_builder(
            input_file=FLAGS.predict_feature_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=False,
            use_tpu=FLAGS.use_tpu,
            bsz=FLAGS.predict_batch_size,
            is_v2=True)

        def get_result(checkpoint):
            """Evaluate the checkpoint on SQuAD v2.0."""
            # If running eval on the TPU, you will need to specify the number of
            # steps.
            reader = tf.train.NewCheckpointReader(checkpoint)
            global_step = reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
            all_results = []
            for result in estimator.predict(predict_input_fn,
                                            yield_single_examples=True,
                                            checkpoint_path=checkpoint):
                if len(all_results) % 1000 == 0:
                    tf.logging.info("Processing example: %d" %
                                    (len(all_results)))
                unique_id = int(result["unique_ids"])
                start_top_log_probs = ([
                    float(x) for x in result["start_top_log_probs"].flat
                ])
                start_top_index = [
                    int(x) for x in result["start_top_index"].flat
                ]
                end_top_log_probs = ([
                    float(x) for x in result["end_top_log_probs"].flat
                ])
                end_top_index = [int(x) for x in result["end_top_index"].flat]

                cls_logits = float(result["cls_logits"].flat[0])
                all_results.append(
                    squad_utils.RawResultV2(
                        unique_id=unique_id,
                        start_top_log_probs=start_top_log_probs,
                        start_top_index=start_top_index,
                        end_top_log_probs=end_top_log_probs,
                        end_top_index=end_top_index,
                        cls_logits=cls_logits))

            output_prediction_file = os.path.join(FLAGS.output_dir,
                                                  "predictions.json")
            output_nbest_file = os.path.join(FLAGS.output_dir,
                                             "nbest_predictions.json")
            output_null_log_odds_file = os.path.join(FLAGS.output_dir,
                                                     "null_odds.json")

            result_dict = {}
            cls_dict = {}
            squad_utils.accumulate_predictions_v2(
                result_dict, cls_dict, eval_examples, eval_features,
                all_results, FLAGS.n_best_size, FLAGS.max_answer_length,
                FLAGS.start_n_top, FLAGS.end_n_top)

            return squad_utils.evaluate_v2(
                result_dict, cls_dict, prediction_json, eval_examples,
                eval_features, all_results, FLAGS.n_best_size,
                FLAGS.max_answer_length, output_prediction_file,
                output_nbest_file, output_null_log_odds_file), int(global_step)

        def _find_valid_cands(curr_step):
            filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
            candidates = []
            for filename in filenames:
                if filename.endswith(".index"):
                    ckpt_name = filename[:-6]
                    idx = ckpt_name.split("-")[-1]
                    if idx != "best" and int(idx) > curr_step:
                        candidates.append(filename)
            return candidates

        output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
        checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
        key_name = "f1"
        writer = tf.gfile.GFile(output_eval_file, "w")
        if tf.gfile.Exists(checkpoint_path + ".index"):
            result = get_result(checkpoint_path)
            best_perf = result[0][key_name]
            global_step = result[1]
        else:
            global_step = -1
            best_perf = -1
            checkpoint_path = None
        while global_step < num_train_steps:
            steps_and_files = {}
            filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
            for filename in filenames:
                if filename.endswith(".index"):
                    ckpt_name = filename[:-6]
                    cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)
                    if cur_filename.split("-")[-1] == "best":
                        continue
                    gstep = int(cur_filename.split("-")[-1])
                    if gstep not in steps_and_files:
                        tf.logging.info(
                            "Add {} to eval list.".format(cur_filename))
                        steps_and_files[gstep] = cur_filename
            tf.logging.info("found {} files.".format(len(steps_and_files)))
            if not steps_and_files:
                tf.logging.info(
                    "found 0 file, global step: {}. Sleeping.".format(
                        global_step))
                time.sleep(60)
            else:
                for ele in sorted(steps_and_files.items()):
                    step, checkpoint_path = ele
                    print("GS: ", global_step, step)
                    if global_step >= step:
                        if len(_find_valid_cands(step)) > 1:
                            for ext in [
                                    "meta", "data-00000-of-00001", "index"
                            ]:
                                src_ckpt = checkpoint_path + ".{}".format(ext)
                                tf.logging.info("removing {}".format(src_ckpt))
                                tf.gfile.Remove(src_ckpt)
                        continue
                    result, global_step = get_result(checkpoint_path)
                    print("EVAL RESULTS")
                    tf.logging.info("***** Eval results *****")
                    for key in sorted(result.keys()):
                        tf.logging.info("  %s = %s", key, str(result[key]))
                        writer.write("%s = %s\n" % (key, str(result[key])))
                    if result[key_name] > best_perf:
                        best_perf = result[key_name]
                        for ext in ["meta", "data-00000-of-00001", "index"]:
                            src_ckpt = checkpoint_path + ".{}".format(ext)
                            tgt_ckpt = checkpoint_path.rsplit(
                                "-", 1)[0] + "-best.{}".format(ext)
                            tf.logging.info("saving {} to {}".format(
                                src_ckpt, tgt_ckpt))
                            tf.gfile.Copy(src_ckpt, tgt_ckpt, overwrite=True)
                            writer.write("saved {} to {}\n".format(
                                src_ckpt, tgt_ckpt))
                    writer.write("best {} = {}\n".format(key_name, best_perf))
                    tf.logging.info("  best {} = {}\n".format(
                        key_name, best_perf))

                    if len(_find_valid_cands(global_step)) > 2:
                        for ext in ["meta", "data-00000-of-00001", "index"]:
                            src_ckpt = checkpoint_path + ".{}".format(ext)
                            tf.logging.info("removing {}".format(src_ckpt))
                            tf.gfile.Remove(src_ckpt)
                    writer.write("=" * 50 + "\n")
            print("Sleeping")
            time.sleep(10)
        checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
        result, global_step = get_result(checkpoint_path)
        tf.logging.info("***** Final Eval results *****")
        for key in sorted(result.keys()):
            tf.logging.info("  %s = %s", key, str(result[key]))
            writer.write("%s = %s\n" % (key, str(result[key])))
        writer.write("best perf happened at step: {}".format(global_step))
コード例 #13
0
ファイル: example.py プロジェクト: shawwn/mtftorch
        w = z**2.0
        with torch.enable_grad():
            q1 = z * 42.0
            q2 = w * 42.0

    z.requires_grad

    channel = torch.ones("H=4 W=4")
    pixel = torch.tensor([0.1, 0.5, 0.9], "C")
    image_hwc = torch.tensor(channel * pixel, requires_grad=True)

    x2 = torch.tensor([42.0], "C=1", requires_grad=True)

    # torch.set_grad_enabled(True)

    with torch.enable_grad():
        loss = image_hwc.mean()
        loss += x2**2

    train_op = loss.backward().tf()
    init_op = zero_grad().tf()

    #gs, tensor_to_gradient = grad([2.0 * z, x + y, x - y], [z, x, y, q1, q2])

    with torch.use_session():
        init_op.eval()
        for i in range(8):
            train_op.eval()

    tf.get_logger().setLevel("WARN")
コード例 #14
0
@author: jjohnarios
"""

from flask import Flask, request
import pandas as pd
import numpy as np
from tensorflow.keras.models import load_model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.python.keras.backend import set_session
import flasgger
from flasgger import Swagger

# Ignore Tensorflow warnings
tf.get_logger().setLevel('INFO')

app = Flask(__name__)
Swagger(app)  # Generate UI using flasgger

# Needed for keras
sess = tf.Session()
graph = tf.get_default_graph()

# IMPORTANT: models have to be loaded AFTER SETTING THE SESSION for keras!
# Otherwise, their weights will be unavailable in the threads after the session there has been set
set_session(sess)

model = load_model("model.h5")

コード例 #15
0
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import os

# do not print INFO and WARNING messages from TensorFlow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
try:
    import tensorflow.compat.v1 as tf_v1
    # disable eager execution of TensorFlow 2 environment immediately
    tf_v1.disable_eager_execution()
except ImportError:
    import tensorflow as tf_v1

#in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work
tf_v1.get_logger().setLevel("ERROR")

try:
    import tensorflow.contrib  # pylint: disable=no-name-in-module,import-error
except:
    pass  # we try to import contrib for loading models that use contrib operations

import logging as log

from openvino.tools.mo.load.loader import Loader
from openvino.tools.mo.front.common.register_custom_ops import check_for_duplicates
from openvino.tools.mo.front.common.register_custom_ops import update_extractors_with_extensions
from openvino.tools.mo.front.extractor import restore_edges, extract_node_attrs, remove_control_dependency_inputs, add_outputs_identity
from openvino.tools.mo.front.tf.extractor import get_tf_edges, create_tf_edge, tf_op_extractor, tf_op_extractors
from openvino.tools.mo.front.tf.loader import load_tf_graph_def, protobuf2nx
from openvino.tools.mo.graph.graph import Graph, Node
コード例 #16
0
def prep_dataset(dataset_filepath, start_date, end_date):
    module_url = "https://tfhub.dev/google/universal-sentence-encoder/2"

    tf.disable_v2_behavior()
    tf.compat.v1.disable_eager_execution()

    cache = {}
    calendar = mcal.get_calendar('NYSE')

    def next_trading_day(start_day=None, SAFE_DELTA=4):
        """Returns the next/previous trading date separated by a certain number of
        trading days.
        """
        if start_day is None:
            start_day = datetime.datetime.utcnow().date()
        if start_day in cache:
            return cache[start_day]
        start = pd.to_datetime(start_day)
        end = start + np.timedelta64(SAFE_DELTA, 'D')
        business_days = calendar.valid_days(start_date=start, end_date=end)
        next_day = business_days[1].date()
        next_day = next_day.strftime("%Y-%m-%d")
        cache[start_day] = next_day
        return next_day

    raw_prices_filepath = dataset_filepath + '/price/raw'
    preprocessed_tweets_filepath = dataset_filepath + '/tweet/preprocessed'

    company_to_price_df = {}
    company_to_tweets = {}

    for filename in os.listdir(raw_prices_filepath):
        with open(raw_prices_filepath + '/' + filename) as file:
            company_name = filename.split('.')[0]

            # Not enough data for GMRE
            if company_name == 'GMRE':
                continue
            df = pd.read_csv(file)
            df.columns = [
                'date', 'open', 'high', 'low', 'close', 'adjust_close',
                'volume'
            ]
            mask = (df['date'] >= start_date) & (df['date'] <= end_date)
            df = df.loc[mask]
            company_to_price_df[company_name] = df.dropna()

    for filename in tqdm(os.listdir(preprocessed_tweets_filepath)):
        company_name = filename.split('.')[0]
        dates_to_tweets = {}
        for tweet_filename in os.listdir(preprocessed_tweets_filepath + '/' +
                                         filename):
            if tweet_filename < start_date or tweet_filename > end_date:
                continue
            with open(preprocessed_tweets_filepath + '/' + filename + '/' +
                      tweet_filename) as file:
                list_of_tweets = []
                for line in file:
                    tweet_json = json.loads(line)
                    list_of_tweets.append(tweet_json)
                date_idx = next_trading_day(tweet_filename)
                if date_idx not in dates_to_tweets:
                    dates_to_tweets[date_idx] = list_of_tweets
                else:
                    dates_to_tweets[date_idx] += list_of_tweets
        company_to_tweets[company_name] = dates_to_tweets

    # Reduce logging output.
    logging.set_verbosity(logging.ERROR)
    tf.get_logger().setLevel(logging.ERROR)
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    # Import the Universal Sentence Encoder's TF Hub module
    def embed_useT(module):
        with tf.Graph().as_default():
            sentences = tf.placeholder(tf.string)
            embed = hub.Module(module)
            embeddings = embed(sentences)
            session = tf.train.MonitoredSession()
        return lambda x: session.run(embeddings, {sentences: x})

    embed_fn = embed_useT(module_url)

    # Generate embeddings
    for company in tqdm(company_to_tweets.keys()):
        for date in company_to_tweets[company].keys():
            messages = []
            for j in range(len(company_to_tweets[company][date])):
                messages.append(' '.join(
                    company_to_tweets[company][date][j]['text']))
                message_embeddings = embed_fn(messages)
            for k in range(len(company_to_tweets[company][date])):
                company_to_tweets[company][date][k]['embedding'] = list(
                    message_embeddings[k])

    # Create date mapping
    date_universe = set()
    for company in company_to_price_df.keys():
        date_universe = date_universe.union(
            set(company_to_price_df[company].date))
    for company in company_to_tweets.keys():
        date_universe = date_universe.union(
            set(company_to_tweets[company].keys()))
    date_universe = sorted(list(date_universe))
    index_to_date = {i - 5: d for i, d in enumerate(date_universe)}
    date_to_index = {d: i - 5 for i, d in enumerate(date_universe)}

    # Calculate dimensions for tensor
    n_stocks = len(company_to_tweets.keys())
    n_days = len(date_universe)
    max_tweets = 0
    for c, d in itertools.product(company_to_tweets.keys(), date_universe):
        if d in company_to_tweets[c]:
            max_tweets = max(max_tweets, len(company_to_tweets[c][d]))
    # Create index mapping for stocks alphabetically
    company_to_index = {
        c: i
        for i, c in enumerate(sorted(list(company_to_tweets.keys())))
    }

    return company_to_price_df, company_to_tweets, date_universe, n_days, n_stocks, max_tweets
コード例 #17
0
from pprint import pprint
from six import BytesIO
from collections import namedtuple

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from PIL import Image, ImageColor, ImageDraw, ImageFont, ImageOps
from tqdm import tqdm
import time

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.disable_v2_behavior()
tf.get_logger().setLevel('WARNING')
tf.autograph.set_verbosity(2)

import sys
sys.path.append(r'/home/irisuser/pycode')
import od_utils

test_path = "/home/irisuser/samples/"
sample_image_path = test_path + "FruitShelf.jpg"

import od_detect
result, image_out = od_detect.detect_image(sample_image_path)

resultDF = TFtoPANDAS(result)
resultDF = resultDF.loc[resultDF.reset_index().groupby(['objectID'
                                                        ])['Score'].idxmax()]
コード例 #18
0
import os
import logging
import constants as ct

# Suppress Tensorflow info messages and warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
import tensorflow.compat.v1 as tf  # noqa E402
tf.get_logger().setLevel(ct.K_LOG_LEVEL[3])
tf.disable_v2_behavior()


class NNEvaluater:

    def __init__(self, model):

        export_dir = os.path.join(os.getcwd(), model)
        self.logger = logging.getLogger(ct.LOGGER)
        self.sess = tf.Session()
        tf.get_logger().setLevel(self.logger.getEffectiveLevel())
        tf.saved_model.loader.load(
            sess=self.sess,
            tags=[tf.saved_model.tag_constants.SERVING],
            export_dir=export_dir)
        graph = tf.get_default_graph()

        self.pegx_t = graph.get_tensor_by_name("pegx:0")
        self.linkx_t = graph.get_tensor_by_name("linkx:0")
        self.locx_t = graph.get_tensor_by_name("locx:0")
        self.is_training_t = graph.get_tensor_by_name("is_training:0")
コード例 #19
0
    def update_placeholder_shape_and_add_transpose(node: Node):
        """
        The function changes placeholders shapes from NHWC to NCHW format and add transpose operations if needed.
        :param node: node to operate on.
        :return: None
        """
        try:
            import tensorflow.compat.v1 as tf_v1
            # disable eager execution of TensorFlow 2 environment immediately
            tf_v1.disable_eager_execution()
        except ImportError:
            import tensorflow as tf_v1
        # in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work
        tf_v1.get_logger().setLevel("ERROR")

        from openvino.tools.mo.front.common.layout import convert_shape, nhwc_to_nchw_permute, nchw_to_nhwc_permute
        from openvino.tools.mo.front.tf.extractors.utils import tf_tensor_shape
        from openvino.tools.mo.front.tf.partial_infer.tf import add_node_def_to_subgraph, update_input_in_pbs

        tf_v1.reset_default_graph()

        inputs_replacements = list()

        # transpose permutation constant
        nchw_to_nhwc_constant = tf_v1.constant(nchw_to_nhwc_permute,
                                               dtype=tf_v1.int32,
                                               name=nchw_to_nhwc_constant_name)
        nhwc_to_nchw_constant = tf_v1.constant(nhwc_to_nchw_permute,
                                               dtype=tf_v1.int32,
                                               name=nhwc_to_nchw_constant_name)

        for placeholder_name in node['input_nodes_names']:
            # dummy node which we can refer to as input in the transpose for the output node
            # dummy node should be unique for each placeholder
            dummy_node = tf_v1.constant(value=[[[[1]]]],
                                        dtype=tf_v1.float32,
                                        name='random_dummy_name_' +
                                        placeholder_name)

            placeholder = node['pbs'][placeholder_name]
            cur_shape = tf_tensor_shape(placeholder.attr['shape'].shape)
            if len(
                    cur_shape
            ) == 4:  # TODO think about better check that transpose is required
                nchw_shape = convert_shape(cur_shape, nhwc_to_nchw_permute)
                for ind in range(len(cur_shape)):
                    placeholder.attr['shape'].shape.dim[ind].size = nchw_shape[
                        ind]
                transpose_name = placeholder.name + '_transpose'
                transpose = tf_v1.transpose(dummy_node, nchw_to_nhwc_constant,
                                            transpose_name)  # NCHW -> NHWC

                # add transpose operations to GraphDef after placeholders
                add_node_def_to_subgraph(node, transpose.op.node_def,
                                         transpose_name,
                                         len(node['input_nodes_names']))
                inputs_replacements.append((placeholder.name, transpose_name))
                inputs_replacements.append((dummy_node.name, placeholder.name))
                node['real_input_dims'].append(nchw_shape)
            else:
                node['real_input_dims'].append(cur_shape)
        add_node_def_to_subgraph(node, nchw_to_nhwc_constant.op.node_def)
        add_node_def_to_subgraph(node, nhwc_to_nchw_constant.op.node_def)

        # update initial input names to a transposed ones
        for old_input_tensor_name, new_name in inputs_replacements:
            update_input_in_pbs(node, old_input_tensor_name, new_name)
コード例 #20
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)
    logger = tf.get_logger()
    logger.propagate = False

    print(FLAGS.model_name)
    arabert_prep = ArabertPreprocessor(model_name=FLAGS.model_name,
                                       remove_html_markup=False)

    with tf.gfile.Open(FLAGS.input_file, "r") as reader:
        input_data = json.load(reader)["data"]

    new_answers_count = 0
    no_answers_found_count = 0
    trunc_ans_count = 0
    for entry in tqdm(input_data):
        for paragraph in entry["paragraphs"]:

            if FLAGS.filter_tydiqa:
                # this will only apply farasa segmentation to Arabic Data
                if "arabic" not in paragraph["qas"][0]["id"]:
                    continue
            old_context = paragraph["context"]
            paragraph["context"] = clean_preprocess(paragraph["context"],
                                                    arabert_prep)
            for qas in paragraph["qas"]:
                qas["question"] = clean_preprocess(qas["question"],
                                                   arabert_prep)

                for i in range(len(qas["answers"])):
                    temp_text = clean_preprocess(qas["answers"][i]["text"],
                                                 arabert_prep)

                    if temp_text == "":
                        temp_text = qas["answers"][i]["text"]

                    answer_location = paragraph["context"].find(temp_text)
                    if answer_location == -1:

                        search_start_pos = get_start_pos(
                            old_context, qas["answers"][i]["answer_start"],
                            arabert_prep)
                        search_end_pos = min(
                            len(paragraph["context"]),
                            search_start_pos + len(temp_text) + 20,
                        )
                        answer_match = find_near_matches(
                            temp_text,
                            paragraph["context"]
                            [search_start_pos:search_end_pos],
                            max_l_dist=min(10,
                                           len(temp_text) // 2),
                        )
                        if len(answer_match) > 0:
                            tf.logging.warning(
                                "Found new answer for question '%s' :\n '%s' \nvs old.\n '%s'\norig:\n'%s'\ncontext:\n'%s'\n==================",
                                qas["id"],
                                answer_match[i].matched,
                                temp_text,
                                qas["answers"][i]["text"],
                                paragraph["context"],
                            )
                            temp_text = answer_match[i].matched
                            qas["answers"][i]["answer_start"] = answer_match[
                                i].start
                            new_answers_count += 1

                        else:
                            tf.logging.warning(
                                "Could not find answer for question '%s' :\n '%s' \nvs.\n '%s'\norig answer:\n '%s'\n==================",
                                qas["id"],
                                paragraph["context"],
                                temp_text,
                                qas["answers"][i]["text"],
                            )
                            qas["answers"][i]["answer_start"] = -1
                            no_answers_found_count += 1
                    else:
                        qas["answers"][i]["answer_start"] = answer_location

                    if len(temp_text) + qas["answers"][i]["answer_start"] < (
                            len(paragraph["context"]) + 1):
                        qas["answers"][i]["text"] = temp_text
                    else:
                        tf.logging.warning(
                            "answer truncated for question '%s' :\n context:\n'%s' \nanswer:\n '%s'\n orig_answer:\n'%s'\nanswer start: %d\nlength of answer: %d\nlength of paragraph: %d\n=================================",
                            qas["id"],
                            paragraph["context"],
                            temp_text,
                            qas["answers"][i]["text"],
                            qas["answers"][0]["answer_start"],
                            len(temp_text),
                            len(paragraph["context"]),
                        )
                        qas["answers"][0]["text"] = temp_text[
                            0:len(paragraph["context"]) -
                            (len(temp_text) +
                             qas["answers"][0]["answer_start"])]
                        trunc_ans_count += 1

    tf.logging.warning("Found %d new answers: ", new_answers_count)
    tf.logging.warning("Found %d with no answers: ", no_answers_found_count)
    tf.logging.warning("Found %d with trunc answers: ", trunc_ans_count)

    input_data = {
        "data": input_data,
        "version": "1.1",
        "preprocess": "True",
    }
    with tf.gfile.Open(FLAGS.output_file, "w") as writer:
        json.dump(input_data, writer)
コード例 #21
0
ファイル: coco.py プロジェクト: rfdickerson/Model-References
def run_coco(args):
    print("Command: ", args.command)
    print("Model: ", args.model)
    print("Dataset: ", args.dataset)
    print("Year: ", args.year)
    print("Logs: ", args.logs)
    print("Auto Download: ", args.download)

    ############################################################
    #  Configurations
    ############################################################
    if args.deterministic:
        tf.config.threading.set_inter_op_parallelism_threads(1)
        tf.config.threading.set_intra_op_parallelism_threads(1)
        tf.reset_default_graph()
        SEED = 0
        os.environ['PYTHONHASHSEED'] = str(SEED)
        os.environ['TF_DETERMINISTIC_OPS'] = '1'
        random.seed(SEED)
        np.random.seed(SEED)
        tf.set_random_seed(SEED)

    is_master = True
    hvd = None

    if args.gpus < 0:
        config = tf.ConfigProto(device_count={'GPU': 0})
        K.set_session(tf.Session(config=config))
        print('running on cpu')

    if args.using_horovod and args.command == "train":
        if args.device in ['HPU']:
            from TensorFlow.common.horovod_helpers import hvd_init, Framework
            hvd = hvd_init(framework=Framework.KERAS)
        else:
            import horovod.tensorflow.keras as hvd
            hvd.init()
            confighorovod = tf.ConfigProto()
            confighorovod.gpu_options.visible_device_list = str(
                hvd.local_rank())
            K.set_session(tf.Session(config=confighorovod))
        is_master = hvd.local_rank() == 0
        if not is_master:
            tf.get_logger().setLevel(tf.logging.FATAL)

    elif args.using_horovod and args.command == "evaluate":
        if args.device in ['HPU']:
            from TensorFlow.common.horovod_helpers import hvd_init, Framework
            hvd = hvd_init(framework=Framework.KERAS)
        else:
            confighorovod = tf.ConfigProto()
            confighorovod.gpu_options.visible_device_list = str(args.gpus)
            K.set_session(tf.Session(config=confighorovod))
        is_master = hvd.local_rank() == 0
        if not is_master:
            tf.get_logger().setLevel(tf.logging.FATAL)

    if args.device in ['HPU']:
        from TensorFlow.common.library_loader import load_habana_module
        load_habana_module()

    dev_str = f'/device:{args.device}:0'
    print(f'Selected device: {dev_str}')

    class CocoConfig(Config):
        """Configuration for training on MS COCO.
        Derives from the base Config class and overrides values specific
        to the COCO dataset.
        """
        # Give the configuration a recognizable name
        NAME = "coco"
        if hvd:
            _GPU_COUNT = hvd.size()
            GPU_COUNT = 1  #fix batch size as IMAGES_PER_GPU
        else:
            _GPU_COUNT = abs(args.gpus)
            GPU_COUNT = _GPU_COUNT

        if args.fchollet_fix:
            BGR = True
            ## mean pixel is in RGB format to match original settings
            MEAN_PIXEL = [123.68, 116.78, 103.94]
        elif args.BGR or 'kapp_' in args.backbone:
            ## BGR/caffe format
            BGR = True
            MEAN_PIXEL = [103.94, 116.78, 123.68]
        else:
            ## default RGB mode
            BGR = False
            MEAN_PIXEL = [123.68, 116.78, 103.94]

        GT_NOISE_STD = 0

        QUICK_TEST = args.quick_test
        ## these can be used to run with dynamic shapes
        BIN_PADDING = None  # 8
        IMAGE_RESIZE_MODE = "square"  # "pad64"
        DYNAMIC_ANCHORS = False  # True
        PRESET_LAYERS_TRAIN = args.train_layers
        if args.dynamic:
            IMAGE_RESIZE_MODE = "pad64"
            DYNAMIC_ANCHORS = True

        if BIN_PADDING or IMAGE_RESIZE_MODE in ['no_pad', 'pad64'
                                                ] or QUICK_TEST:
            IMAGES_PER_GPU = 1
        else:
            IMAGES_PER_GPU = 4
        # Override if specified.
        if args.images_per_gpu is not None:
            IMAGES_PER_GPU = args.images_per_gpu
        # always evaluate using same number of samples regardless of number of gpus
        VAL_SAMPLES = 1600
        if QUICK_TEST:
            VAL_SAMPLES = 1
        _BATCH_SIZE = _GPU_COUNT * IMAGES_PER_GPU
        VALIDATION_STEPS = None  # VAL_SAMPLES//_BATCH_SIZE
        if args.validation_steps is not None:
            VALIDATION_STEPS = args.validation_steps
        # lr is scaled with respect to the actual number of gpus
        LEARNING_RATE = 0.02 * (_BATCH_SIZE / 16)**0.5
        DETERMINISTIC = args.deterministic
        if args.deterministic:
            LEARNING_RATE = 0
        STEPS_PER_EPOCH = None  # 5000
        PYRAMID_ROI_CUSTOM_OP = int(args.custom_roi)
        LEARNING_MOMENTUM_CONST = True if args.momentum_const == '1' else False
        COMBINED_NMS_OP = True if args.combined_nms == '1' else False
        USE_VALID_BOXES = args.use_valid_boxes
        if args.xl_inputs:
            TRAIN_ROIS_PER_IMAGE = 512
            ROI_POSITIVE_RATIO = 0.25
            IMAGE_MIN_DIM_TRAIN = [640, 672, 704, 736, 768, 800, 832]
            IMAGE_MIN_DIM_VAL = 832
            IMAGE_MAX_DIM = 1344
        else:
            TRAIN_ROIS_PER_IMAGE = 256
            ROI_POSITIVE_RATIO = 0.33
            IMAGE_MIN_DIM_TRAIN = [640, 672, 704, 736, 768, 800]
            IMAGE_MIN_DIM_VAL = 800
            IMAGE_MAX_DIM = 1024
        if QUICK_TEST:
            TRAIN_ROIS_PER_IMAGE = 20
            IMAGE_MAX_DIM = 512
        if args.clip_norm > 0:
            GRADIENT_CLIP_NORM = args.clip_norm
        else:
            GRADIENT_CLIP_NORM = None
        # Number of classes (including background)
        NUM_CLASSES = 1 + 80  # COCO has 80 classes
        BACKBONE = args.backbone
        RPN_ONLY = args.rpn_only
        ### schedual settings
        WARMUP = 1000
        if args.warmup_steps is not None:
            WARMUP = args.warmup_steps
        if QUICK_TEST:
            WARMUP = 1
        if RPN_ONLY:
            DROPS = [40, 60]
            TOT_EPOCHS = 70
        else:
            if args.short:  ## short regime
                DROPS = [77, 154]
                TOT_EPOCHS = 175
            else:  ## long regime
                DROPS = [210, 280]
                TOT_EPOCHS = 300

        if args.epochs is not None:
            TOT_EPOCHS = args.epochs

        if args.steps_per_epoch is not None:
            STEPS_PER_EPOCH = args.steps_per_epoch

        if STEPS_PER_EPOCH is not None:
            _SCHEDUAL_RATIO = max(STEPS_PER_EPOCH // 1000, 1)
        else:
            _SCHEDUAL_RATIO = max((117280 // _BATCH_SIZE) // 1000, 1)
        for i, v in enumerate(DROPS):
            DROPS[i] = int(v / _SCHEDUAL_RATIO + 0.5)
        del i
        del v
        if args.epochs is None:
            TOT_EPOCHS = int(TOT_EPOCHS / _SCHEDUAL_RATIO + 0.5)

    class InferenceConfig(CocoConfig):
        # Set batch size to 1 since we'll be running inference on
        # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1
        DETECTION_MIN_CONFIDENCE = 0.001

    if args.command == "train":
        config = CocoConfig()
        mode = "training"
    else:
        config = InferenceConfig()
        mode = "inference"

    with tf.device("/device:CPU:0"):
        model = modellib.MaskRCNN(dev_str,
                                  mode=mode,
                                  config=config,
                                  model_dir=args.logs,
                                  hvd=hvd)

    exclude = None
    # Select weights file to load
    if args.model.lower() == "coco":
        model_path = COCO_MODEL_PATH
    elif args.model.lower() == "last":
        # Find last trained weights
        model_path = model.find_last()
    elif args.model.lower() == "imagenet":
        # Start from ImageNet trained weights
        with tf.device(dev_str):
            model_path = model.get_imagenet_weights()
    else:
        model_path = args.model
        if 'r101_imagenet_init.h5' in args.model:
            exclude = r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)|(anchors.*)|(mask\_.*)|"

    # Load weights
    if is_master:
        config.display()
        model.keras_model.summary()
        print("Loading weights", model_path)
    if 'keras' not in args.model:
        # keras backbone weights are automatically loaded during build
        with tf.device(dev_str):
            model.load_weights(model_path,
                               by_name=True,
                               exclude=exclude,
                               resume=args.resume,
                               verbose=is_master)
    # Train or evaluate
    if args.command == "train":
        # Training dataset. Use the training set and 35K from the
        # validation set, as as in the Mask RCNN paper.
        num_shards = 1
        shard_id = 0
        if hvd:
            num_shards = hvd.local_size()
            shard_id = hvd.local_rank()
        dataset_train = CocoDataset()
        dataset_train.load_coco(args.dataset,
                                "train",
                                year=args.year,
                                auto_download=args.download,
                                num_shards=num_shards,
                                shard_id=shard_id)

        if args.year in '2014':
            dataset_train.load_coco(args.dataset,
                                    "valminusminival",
                                    year=args.year,
                                    auto_download=args.download,
                                    num_shards=num_shards,
                                    shard_id=shard_id)

        dataset_train.prepare()
        # Validation dataset
        dataset_val = CocoDataset()
        val_type = "val" if args.year in '2017' else "minival"
        dataset_val.load_coco(args.dataset,
                              val_type,
                              year=args.year,
                              auto_download=args.download,
                              num_shards=num_shards,
                              shard_id=shard_id,
                              limit=config.VAL_SAMPLES)
        dataset_val.prepare()

        augmentation = iaa.Fliplr(0.5)
        callbacks = []

        ## add callbacks here
        schedule = COCOScheduler(config.LEARNING_RATE,
                                 warmup_steps=config.WARMUP,
                                 gamma=0.1,
                                 drops=config.DROPS,
                                 verbose=is_master)
        callbacks += [schedule]

        external_callbacks = getattr(args, 'external_callbacks', None)
        if external_callbacks is not None:
            callbacks.extend(external_callbacks)

        if is_master:
            print("Training Resnet stage 3+nobn")
        with tf.device("/device:CPU:0"):
            model.train(dev_str,
                        dataset_train,
                        dataset_val,
                        learning_rate=config.LEARNING_RATE,
                        epochs=config.TOT_EPOCHS,
                        layers=config.PRESET_LAYERS_TRAIN,
                        augmentation=augmentation,
                        custom_callbacks=callbacks,
                        dump_tf_timeline=args.dump_tf_timeline,
                        disable_validation=args.disable_validation)

    elif args.command == "evaluate":
        # Validation dataset
        dataset_val = CocoDataset()
        val_type = "val" if args.year in '2017' else "minival"
        coco = dataset_val.load_coco(
            args.dataset,
            val_type,
            year=args.year,
            return_coco=True,
            auto_download=args.download,
            limit=args.limit if args.limit > 0 else None)
        dataset_val.prepare()
        print("Running COCO evaluation on {} images.".format(
            len(dataset_val.image_info)))
        evaluate_coco(model, dataset_val, coco)
    else:
        print("'{}' is not recognized. "
              "Use 'train' or 'evaluate'".format(args.command))
コード例 #22
0
from absl import flags
from matplotlib import cm
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp

from eim.models import base
from eim.models import his
from eim.models import lars
from eim.models import nis
from eim.models import rejection_sampling
import eim.small_problems_dists as dists

tfd = tfp.distributions

tf.get_logger().setLevel("INFO")
flags.DEFINE_enum("algo", "lars", ["lars", "nis", "his", "rejection_sampling"],
                  "The algorithm to run.")
flags.DEFINE_boolean(
    "lars_allow_eval_target", False,
    "Whether LARS is allowed to evaluate the target density.")
flags.DEFINE_enum("target", dists.NINE_GAUSSIANS_DIST, dists.TARGET_DISTS,
                  "Distribution to draw data from.")
flags.DEFINE_float("proposal_variance", 1.0, "Variance for the proposal")
flags.DEFINE_float(
    "nine_gaussians_variance", 0.01,
    "Variance for the mixture components in the nine gaussians.")
flags.DEFINE_string(
    "energy_fn_sizes", "20,20",
    "List of hidden layer sizes for energy function as as comma "
    "separated list.")
コード例 #23
0
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util

flags = tf.app.flags
flags.DEFINE_string('output_path', '/tmp/', 'Path to output tf.Record file.')
flags.DEFINE_string('image_dir', '', 'Directory containing the image files.')
flags.DEFINE_string('annotations_file', '', 'JSON file containing bounding box annotations.')
flags.DEFINE_boolean(
    'include_masks', False, 'Whether to include instance segmentations masks '
    '(PNG encoded) in the result.')
flags.DEFINE_integer('num_shards', 100, 'Number of output file shards.', lower_bound=1)
FLAGS = flags.FLAGS

logger = tf.get_logger()
logger.setLevel(logging.INFO)


def create_tf_example(image,
                      annotations_list,
                      image_dir,
                      category_index,
                      include_masks=False):
  """Converts image and annotations to a tf.Example proto.

  Args:
    image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
      u'width', u'date_captured', u'flickr_url', u'id']
    annotations_list:
      list of dicts with keys: [u'segmentation', u'area', u'iscrowd',
コード例 #24
0
#from PIL import Image
import os
import glob
import cv2 as cv2
import pprint
import numpy as np
import logging
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.get_logger().setLevel(logging.ERROR)
from tensorflow.python.framework import graph_util

print("\nSet up initial settings ... ")
configuration = {
    "path": "ds-vasyl-lyashkevych//",
    "height": 224,
    "width": 224,
    "channel": 3,
    "ratio": 0.8,
    "batch_size": 2,
    "num_epochs": 10,
    "pb_file_path": "parking_vgg.pb"
}
pprint.pprint(configuration)
print("\n")


def read_img_hdd(path):
    clss = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
    print("Classes", clss)
    imgs = []
コード例 #25
0
ファイル: train.py プロジェクト: SashankV05/Jade_T5
create_registry(os.path.join(args.dir, args.train),
                os.path.join(args.dir, args.val), args.taskname,
                args.compression)
args.tpu_address = f"grpc://{args.tpu_address}:8470"

if args.tpu_address != None:
    tpu = tf.distribute.cluster_resolver.TPUClusterResolver(
        tpu=args.tpu_address)
    tf.enable_eager_execution()
    tf.config.experimental_connect_to_cluster(tpu)
    tf.tpu.experimental.initialize_tpu_system(tpu)
    print("All devices: ", tf.config.list_logical_devices('TPU'))
    tf.disable_v2_behavior()

tf.get_logger().propagate = False
py_logging.root.setLevel('INFO')


@contextmanager
def tf_verbosity_level(level):
    og_level = tf.logging.get_verbosity()
    tf.logging.set_verbosity(level)
    yield
    tf.logging.set_verbosity(og_level)


MODEL_SIZE = args.model_size
MODELS_DIR = os.path.join(args.dir, "models")
# Public GCS path for T5 pre-trained model checkpoints
BASE_PRETRAINED_DIR = "gs://t5-data/pretrained_models"
コード例 #26
0
ファイル: main.py プロジェクト: shawwn/lm
def fix_logger():
    v1.get_logger().propagate = False