Exemple #1
0
def set_logging(level):
    logging._warn_preinit_stderr = 0  # pylint: disable=protected-access
    log_level = parse_logging_level(level)

    logging.set_verbosity(log_level)

    logging.debug("Python Version: %s", sys.version)
Exemple #2
0
def load_model(base_dir, master_spec_name, checkpoint_name):
    # Read the master spec
    master_spec = spec_pb2.MasterSpec()
    with open(os.path.join(base_dir, master_spec_name), "r") as f:
        text_format.Merge(f.read(), master_spec)
    spec_builder.complete_master_spec(master_spec, None, base_dir)
    logging.set_verbosity(logging.WARN)  # Turn off TensorFlow spam.

    # Initialize a graph
    graph = tf.Graph()
    with graph.as_default():
        hyperparam_config = spec_pb2.GridPoint()
        builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
        # This is the component that will annotate test sentences.
        annotator = builder.add_annotation(enable_tracing=True)
        builder.add_saver(
        )  # "Savers" can save and load models; here, we're only going to load.

    sess = tf.Session(graph=graph)
    with graph.as_default():
        #sess.run(tf.global_variables_initializer())
        #sess.run('save/restore_all', {'save/Const:0': os.path.join(base_dir, checkpoint_name)})
        builder.saver.restore(sess, os.path.join(base_dir, checkpoint_name))

    def annotate_sentence(sentence):
        with graph.as_default():
            return sess.run([annotator['annotations'], annotator['traces']],
                            feed_dict={annotator['input_batch']: [sentence]})

    return annotate_sentence
Exemple #3
0
 def __init__(self, path):
     self.path = path
     if ShazimService.fvmodel == None:
         logging.set_verbosity(logging.ERROR)
         logging.info(f"Load TensorFlow model: ")
         ShazimService.fvmodel = tf.saved_model.load(
             "hubmodule/feature-vector.4")
     self.pil = Image.open(path)
     self.size = os.stat(path)[6]
     self.tfimg = self.load_img(self.path)
Exemple #4
0
def tensorflow_shutup():
    """
    Make Tensorflow less verbose
    """
    try:
        # noinspection PyPackageRequirements
        import os
        from tensorflow import logging
        logging.set_verbosity(logging.ERROR)
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

        # Monkey patching deprecation utils to shut it up! Maybe good idea to disable this once after upgrade
        # noinspection PyUnusedLocal
        def deprecated(date, instructions, warn_once=True):
            def deprecated_wrapper(func):
                return func

            return deprecated_wrapper

        from tensorflow.python.util import deprecation
        deprecation.deprecated = deprecated

    except ImportError:
        pass
def main(_):
    logging.set_verbosity(logging.INFO)
    tf.compat.v1.enable_v2_behavior()
    train_eval(FLAGS.root_dir)
Exemple #6
0
def test_sanity_fetching():
    papers = fetch._get_parsed_data()
    assert len(papers) > 1


@pytest.mark.parametrize(
    ["text", "num_tokens"],
    [
        pytest.param("computer vision is great", 3),
        pytest.param("object detection", 2),
    ],
)
def test_sanity_check_processor(text, num_tokens):
    tokens = p.tokenize(text)
    assert len(tokens) == num_tokens


@pytest.mark.parametrize(
    ["text"],
    [
        pytest.param(["walk", "best", "better"]),
    ],
)
def test_sanity_check_lemmatize(text):
    tokens = p.lemmatize(text)
    assert len(tokens) == len(text)


if __name__ == "__main__":
    logging.set_verbosity(logging.WARNING)
  def __init__(self,
               model_name: str,
               uri: str,
               hparams: str = '',
               model_dir: Optional[str] = None,
               epochs: int = 50,
               batch_size: int = 64,
               steps_per_execution: int = 1,
               moving_average_decay: int = 0,
               var_freeze_expr: str = '(efficientnet|fpn_cells|resample_p6)',
               tflite_max_detections: int = 25,
               strategy: Optional[str] = None,
               tpu: Optional[str] = None,
               gcp_project: Optional[str] = None,
               tpu_zone: Optional[str] = None,
               use_xla: bool = False,
               profile: bool = False,
               debug: bool = False,
               tf_random_seed: int = 111111) -> None:
    """Initialze an instance with model paramaters.

    Args:
      model_name: Model name.
      uri: TF-Hub path/url to EfficientDet module.
      hparams: Hyperparameters used to overwrite default configuration. Can be
        1) Dict, contains parameter names and values; 2) String, Comma separated
        k=v pairs of hyperparameters; 3) String, yaml filename which's a module
        containing attributes to use as hyperparameters.
      model_dir: The location to save the model checkpoint files.
      epochs: Default training epochs.
      batch_size: Training & Evaluation batch size.
      steps_per_execution: Number of steps per training execution.
      moving_average_decay: Float. The decay to use for maintaining moving
        averages of the trained parameters.
      var_freeze_expr: Expression to freeze variables.
      tflite_max_detections: The max number of output detections in the TFLite
        model.
      strategy:  A string specifying which distribution strategy to use.
        Accepted values are 'tpu', 'gpus', None. tpu' means to use TPUStrategy.
        'gpus' mean to use MirroredStrategy for multi-gpus. If None, use TF
        default with OneDeviceStrategy.
      tpu: The Cloud TPU to use for training. This should be either the name
        used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470
          url.
      gcp_project: Project name for the Cloud TPU-enabled project. If not
        specified, we will attempt to automatically detect the GCE project from
        metadata.
      tpu_zone: GCE zone where the Cloud TPU is located in. If not specified, we
        will attempt to automatically detect the GCE project from metadata.
      use_xla: Use XLA even if strategy is not tpu. If strategy is tpu, always
        use XLA, and this flag has no effect.
      profile: Enable profile mode.
      debug: Enable debug mode.
      tf_random_seed: Fixed random seed for deterministic execution across runs
        for debugging.
    """
    self.model_name = model_name
    self.uri = uri
    self.batch_size = batch_size
    config = hparams_config.get_efficientdet_config(model_name)
    config.override(hparams)
    config.image_size = utils.parse_image_size(config.image_size)
    config.var_freeze_expr = var_freeze_expr
    config.moving_average_decay = moving_average_decay
    config.tflite_max_detections = tflite_max_detections
    if epochs:
      config.num_epochs = epochs

    if use_xla and strategy != 'tpu':
      tf.config.optimizer.set_jit(True)
      for gpu in tf.config.list_physical_devices('GPU'):
        tf.config.experimental.set_memory_growth(gpu, True)

    if debug:
      tf.config.experimental_run_functions_eagerly(True)
      tf.debugging.set_log_device_placement(True)
      os.environ['TF_DETERMINISTIC_OPS'] = '1'
      tf.random.set_seed(tf_random_seed)
      logging.set_verbosity(logging.DEBUG)

    if strategy == 'tpu':
      tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
          tpu, zone=tpu_zone, project=gcp_project)
      tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
      tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
      ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
      logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
      tf.config.set_soft_device_placement(True)
    elif strategy == 'gpus':
      ds_strategy = tf.distribute.MirroredStrategy()
      logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
      if tf.config.list_physical_devices('GPU'):
        ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
      else:
        ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    self.ds_strategy = ds_strategy

    if model_dir is None:
      model_dir = tempfile.mkdtemp()
    params = dict(
        profile=profile,
        model_name=model_name,
        steps_per_execution=steps_per_execution,
        model_dir=model_dir,
        strategy=strategy,
        batch_size=batch_size,
        tf_random_seed=tf_random_seed,
        debug=debug)
    config.override(params, True)
    self.config = config

    # set mixed precision policy by keras api.
    precision = utils.get_precision(config.strategy, config.mixed_precision)
    policy = tf.keras.mixed_precision.experimental.Policy(precision)
    tf.keras.mixed_precision.experimental.set_policy(policy)
Exemple #8
0
def main(argv):
    del argv
    engine.init()

    if FLAGS.debug:
        # DEBUG, INFO, WARN, ERROR, FATAL
        logging.set_verbosity(logging.DEBUG)
    else:
        logging.set_verbosity(logging.INFO)

    data_file = os.path.join(FLAGS.dir, FLAGS.data_filename)

    # load config
    with open(FLAGS.config_filename, 'r') as json_file:
        config = json.load(json_file)

    if not FLAGS.fig:
        pars = parse(config)

        os.makedirs(os.path.dirname(data_file), exist_ok=True)
        prev_files = os.listdir(FLAGS.dir)
        if FLAGS.rm:
            for file in prev_files:
                os.remove(os.path.join(FLAGS.dir, file))
        else:
            if os.listdir(FLAGS.dir):
                logging.fatal(('%s/ is not empty. Make sure you have'
                               ' archived previously generated data. '
                               'Try --rm flag which will automatically'
                               ' delete previous data.') % FLAGS.dir)

        trial_ind_show = 0
        data_dir = pars['data_dir']
        # special trial (repetition) to show (Change it if you need)
        rois_per_trial_num = pars['rois_per_trial_num']
        # frame rate ratio between Two Photon imaging and Behavior
        frame_rate_ratio = pars['frame_rate_ratio']
        # Common names to be converted to Ids
        event_name_list = pars['event_name_list']

        procs = pars['processors']
        verbosity = pars['verbosity']

        ds = Dataset(verbosity, procs)
        #ds.mat_to_pkl(data_dir)
        ds.load_from_pkl(data_dir)
        ds.to_pckl()

        Data_x = []
        Data_y = []
        for exp in tqdm(ds.experiment_list):
            for trial in exp.trial_list:
                if len(trial.bda_list) < 1 or len(trial.tpa_list) < 1:
                    continue

                #trial.tpa_list[-1].print_param()
                #trial.bda_list[-1].print_param()

                rois_per_trial_num = len(trial.tpa_list)
                dffData = trial.tpa_list[0].procROI
                framNum = dffData.shape[0]
                dffDataArray = np.tile(dffData, (rois_per_trial_num, 1))
                roiNames = []

                for m in range(rois_per_trial_num):
                    dffDataArray[m, :] = trial.tpa_list[m].procROI
                    roiNames.append(trial.tpa_list[m].Name)

                # extract ROI df/F data
                rois_per_trial_num = len(trial.tpa_list)
                dffData = trial.tpa_list[0].procROI
                framNum = dffData.shape[0]
                dffDataArray = np.tile(dffData, (rois_per_trial_num, 1))
                roiNames = []

                for m in range(rois_per_trial_num):
                    dffDataArray[m, :] = trial.tpa_list[m].procROI
                    roiNames.append(trial.tpa_list[m].Name)

                # extract Event Time data
                eventsPerTrialNum = len(trial.bda_list)
                timeData = trial.bda_list[0].TimeInd
                eventDataArray = np.zeros((framNum, eventsPerTrialNum))
                eventNames = []

                for m in range(eventsPerTrialNum):
                    timeInd = np.array(trial.bda_list[m].tInd).flatten()
                    timeInd = np.round(timeInd / frame_rate_ratio
                                       )  # transfers to time of the two photon

                    timeInd = np.concatenate(
                        ([1], [np.min(np.concatenate((timeInd, [framNum])))]))
                    timeInd = np.sort(timeInd)
                    eventDataArray[int(timeInd[0]):int(timeInd[1]), m] = 1
                    eventNames.append(
                        (trial.bda_list[m].Name, trial.bda_list[m].SeqNum))

                if len(eventNames) > 1:
                    Data_x.append(dffDataArray.T)
                    if eventNames[1][0] == 'success':
                        Data_y.append(1)
                    else:
                        Data_y.append(0)

        Data_x_subset = []
        Data_y_subset = []
        for i, x in enumerate(Data_x):
            if x.shape[-1] == 379 and x.shape[0] == 360:
                Data_x_subset.append(x)
                Data_y_subset.append(Data_y[i])
Exemple #9
0
 def setUp(self):
     super(TestCase, self).setUp()
     util.setup_logging()
     logging.set_verbosity(logging.DEBUG)
     logging.getLogger('werkzeug').setLevel(logging.INFO)
     logging.debug('=== %s ===', self._method)