示例#1
0
    def __init__(self, config: TrainerConfig):
        """

        Args:
            config (TrainerConfig): configuration used to construct this trainer
        """
        Trainer._trainer_progress = _TrainerProgress()
        root_dir = os.path.expanduser(config.root_dir)
        os.makedirs(root_dir, exist_ok=True)
        logging.get_absl_handler().use_absl_log_file(log_dir=root_dir)
        self._root_dir = root_dir
        self._train_dir = os.path.join(root_dir, 'train')
        self._eval_dir = os.path.join(root_dir, 'eval')

        self._algorithm_ctor = config.algorithm_ctor
        self._algorithm = None

        self._num_checkpoints = config.num_checkpoints
        self._checkpointer = None

        self._evaluate = config.evaluate
        self._eval_interval = config.eval_interval

        self._summary_interval = config.summary_interval
        self._summaries_flush_secs = config.summaries_flush_secs
        self._summary_max_queue = config.summary_max_queue
        self._debug_summaries = config.debug_summaries
        self._summarize_grads_and_vars = config.summarize_grads_and_vars
        self._config = config

        self._random_seed = common.set_random_seed(config.random_seed)
示例#2
0
def main(_):
    def get_run_name() -> str:
        from rl_project import _get_run_name
        return _get_run_name(
            algorithm=FLAGS.algorithm,
            scenario_number=FLAGS.scenario_number,
            number_of_steps=FLAGS.number_of_steps,
            number_of_envs=FLAGS.number_of_envs,
        )

    run_name = get_run_name()
    logger.get_absl_handler().use_absl_log_file(run_name, './')
    logger.info("STARTED")
    time_start = time.time()

    from rl_project.trainer import create_rl_algorithm_model
    trained_model = create_rl_algorithm_model(
        algorithm=FLAGS.algorithm,
        algorithm_policy=FLAGS.algorithm_policy,
        scenario_number=FLAGS.scenario_number,
        number_of_envs=FLAGS.number_of_envs,
        representation=FLAGS.representation,
        stacked=FLAGS.stacked,
    ).learn(FLAGS.number_of_steps)
    time_elapsed = time.time() - time_start
    logger.info("Time elapsed " +
                time.strftime("%H:%M:%S", time.gmtime(time_elapsed)))
    trained_model.save(save_path=run_name)
示例#3
0
def main(_):

    if FLAGS.log_dir:
        os.makedirs(FLAGS.log_dir, exist_ok=True)
        logging.get_absl_handler().use_absl_log_file(FLAGS.task, FLAGS.log_dir)

    logging.debug('Arguments:')
    for k, v in FLAGS.flag_values_dict().items():
        logging.debug(f'- {k}: {v}')

    # Load original data
    idir = os.path.join(FLAGS.data_npy_dir, FLAGS.input_name)
    loader = DataLoader(idir)

    # Find feature indices
    idxs = []
    for f in FLAGS.features:
        i = np.argwhere(loader.feature == f)
        assert len(i) >= 1, f'Unknown feature "{f}"!'
        assert len(i) <= 1, f'Duplicated feature "{f}"!'
        idxs.append(i[0, 0])
    idxs = np.asarray(idxs)

    # Select feature and columns of X
    loader.feature = loader.feature[idxs]
    loader.train_set.x = loader.train_set.x[:, idxs]
    loader.test_set.x = loader.test_set.x[:, idxs]

    # Dump new data
    odir = os.path.join(FLAGS.data_npy_dir, FLAGS.output_name)
    loader.dump(loader.feature,
                loader.train_set,
                loader.test_set,
                output_dir=odir)
示例#4
0
    def __init__(self, flags):
        super().__init__(flags)
        self.seed_everything()
        if not os.path.exists(flags.summaries_dir):
            os.makedirs(flags.summaries_dir)
        logging.get_absl_handler().use_absl_log_file(
            program_name='DeePray',
            log_dir=flags.summaries_dir
        )
        logging.info(' {} Initialize training'.format(
            time.strftime("%Y%m%d %H:%M:%S")))
        if 'use_autotuner' in flags.__dict__:
            self.flags = flags
            logging.info(self.flags)
        else:
            self.flags = FLAGS
            logging.info('\ttf.app.flags.FLAGS:')
            for key, value in sorted(self.flags.flag_values_dict().items()):
                logging.info('\t{:25}= {}'.format(key, value))

        self.next_step_to_trace = 2
        self.batch_size = self.flags.prebatch * self.flags.batch_size
        self.best_loss = float('Inf')
        self.best_checkpoint = None
        self.patient_pass = 0
        self.max_patient_passes = self.flags.patient_valid_passes
        self.prediction_signature = None
        self.last_model_path = None
        self.LABEL, self.CATEGORY_FEATURES, self.NUMERICAL_FEATURES, \
        self.VOC_SIZE, self.VARIABLE_FEATURES = self.get_summary()
        self.metrics_object = self.build_metrics()
        self.loss_object = self.build_loss()
示例#5
0
def main(_):
    def get_run_name() -> str:
        return "EVALUATOR_{algorithm}_scenario-{scenario}_accuracy-{accuracy}".format(
            algorithm=FLAGS.algorithm,
            scenario=FLAGS.scenario_number,
            accuracy=str(FLAGS.accuracy),
        )

    if FLAGS.path is None:
        raise ValueError(
            "path to trained model must be given. Please run script with --help option"
        )
    logger.get_absl_handler().use_absl_log_file(get_run_name(), './')
    from rl_project.algorithm import load_model
    model = load_model(FLAGS.path, FLAGS.algorithm)
    from rl_project.evaluator import evaluate_model
    total_reward = evaluate_model(
        model=model,
        scenario_number=FLAGS.scenario_number,
        accuracy=FLAGS.accuracy,
        reward=FLAGS.reward,
        representation=FLAGS.representation,
        stacked=FLAGS.stacked,
        render=FLAGS.render,
        logging=logger.info,
    )
    logger.info(
        "EVALUATOR: ended with {runs} runs and receive in total reward: {reward}. Average reward: {average}"
        .format(
            runs=FLAGS.accuracy,
            reward=total_reward,
            average=total_reward / FLAGS.accuracy,
        ))
示例#6
0
def setup_logging():
    """Logging related setup.

  Creates the log directory if it does not exist and sets up the output of
  logging to use absl log file.
  """
    logging.set_verbosity(logging.INFO)

    log_dir = ''
    if FLAGS.log_dir:
        log_dir = FLAGS.log_dir + LOG_FOLDER_NAME
    else:
        log_dir = logging.find_log_dir() + LOG_FOLDER_NAME

    if FLAGS.enable_mount_directory:
        log_dir = MOUNT_DIR + log_dir

    try:
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        logging.get_absl_handler().use_absl_log_file('', log_dir)
    except OSError as err:
        logging.error('Failed to create log directory %s, err %s', log_dir,
                      str(err))
    except Exception as ex:  # pylint: disable=broad-except
        logging.error('Unknown exception occurs in setup_logging, err: %s',
                      str(ex))
示例#7
0
    def train(self,
              train_dataset,
              valid_dataset,
              test_dataset,
              max_epochs,
              valid_gap_epochs=1,
              auto_save=True):
        """ Run the training """
        folder_path, model_path = self.make_dir()
        logging.get_absl_handler().use_absl_log_file('logs', folder_path)
        logging.info('Start training with max epochs {}'.format(max_epochs))
        logging.info('Model and logs saved in {}'.format(folder_path))
        logging.info('Number of trainable parameters - {}'.format(
            self.num_params()))
        if self.write_summary:
            self.model_wrapper.init_summary_writer(folder_path)

        best_eval_loss = float('inf')

        for i in range(1, max_epochs + 1):
            loss, prediction = self.run_one_epoch(train_dataset,
                                                  RunnerPhase.TRAIN, self.lr)

            # Record the train loss
            loss_metrics = {'loss': np.mean(loss)}

            logging.info('Epoch {0} -- Training loss {1}'.format(
                i, loss_metrics['loss']))
            self.model_wrapper.write_summary(i, loss_metrics,
                                             RunnerPhase.TRAIN)

            # Evaluate the model
            if i % valid_gap_epochs == 0:
                loss, prediction = self.run_one_epoch(valid_dataset,
                                                      RunnerPhase.VALIDATE,
                                                      self.lr)
                # Record the validation loss
                loss_metrics = {'loss': np.mean(loss)}
                logging.info('Epoch {0} -- Validation loss {1}'.format(
                    i, loss_metrics['loss']))
                self.model_wrapper.write_summary(i, loss_metrics,
                                                 RunnerPhase.VALIDATE)

                # If it is the best model on valid set
                if best_eval_loss > loss_metrics['loss']:
                    metrics = self.evaluate(test_dataset)
                    self.model_wrapper.write_summary(i, metrics,
                                                     RunnerPhase.PREDICT)

                    best_eval_loss = loss_metrics['loss']
                    if auto_save:
                        self.model_wrapper.save(model_path)

        if not auto_save:
            self.model_wrapper.save(model_path)
        else:
            # use the saved model based on valid performance
            self.restore(model_path)

        logging.info('Training finished')
示例#8
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    logging.get_absl_handler().use_absl_log_file()

    logging.info('Opening %s', FLAGS.output_sqlite)
    db = smu_sqlite.SMUSQLite(FLAGS.output_sqlite, 'c')

    if FLAGS.bond_topology_csv:
        logging.info('Starting smiles to btid inserts')
        smiles_id_dict = smu_utils_lib.smiles_id_dict_from_csv(
            open(FLAGS.bond_topology_csv))
        db.bulk_insert_smiles(smiles_id_dict.items())
        logging.info('Finished smiles to btid inserts')
    else:
        logging.info('Skipping smiles inserts')

    logging.info('Starting main inserts')
    dataset = tf.data.TFRecordDataset(gfile.glob(FLAGS.input_tfrecord))
    db.bulk_insert((raw.numpy() for raw in dataset), batch_size=10000)

    logging.info('Starting vacuuming')
    db.vacuum()
    logging.info('Vacuuming finished')
示例#9
0
文件: main.py 项目: joaogui1/flax
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    FLAGS.log_dir = FLAGS.workdir
    FLAGS.stderrthreshold = 'info'
    logging.get_absl_handler().start_logging_to_file()

    # Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make
    # it unavailable to JAX.
    tf.config.experimental.set_visible_devices([], 'GPU')

    logging.info('JAX host: %d / %d', jax.host_id(), jax.host_count())
    logging.info('JAX local devices: %r', jax.local_devices())

    # Add a note so that we can tell which task is which JAX host.
    # (Depending on the platform task 0 is not guaranteed to be host 0)
    platform.work_unit().set_task_status(
        f'host_id: {jax.host_id()}, host_count: {jax.host_count()}')
    platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
                                         FLAGS.workdir, 'workdir')

    if FLAGS.sample:
        sample.save_images(sample.generate_sample(FLAGS.config, FLAGS.workdir),
                           'sample.png')
    else:
        train.train_and_evaluate(FLAGS.config, FLAGS.workdir)
示例#10
0
def setup_logging_and_exp_folder():
    # Random string if debug
    if FLAGS.debug:
        FLAGS.experiment = "{}_{}".format(FLAGS.mode, random_string())

    # Use time stamp or user specified if not debug
    else:
        ts = time.time()
        FLAGS.experiment = FLAGS.experiment if FLAGS.experiment is not None else \
            "{}_{}".format(FLAGS.mode,
                           datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M'))
    training_folder = os.path.join(EXP_DIR, FLAGS.experiment)

    # Create train folder
    if os.path.exists(training_folder):
        print('{} exists!'.format(training_folder))
        exit(-1)
    else:
        os.makedirs(training_folder, exist_ok=False)

    # set up logging
    if FLAGS.debug:
        logging.get_absl_handler().python_handler.stream = sys.stdout
    else:
        logging.get_absl_handler().use_absl_log_file('absl_logging',
                                                     training_folder)
    return training_folder
def main(_):

    if FLAGS.log_dir:
        os.makedirs(FLAGS.log_dir, exist_ok=True)
        logging.get_absl_handler().use_absl_log_file(FLAGS.task, FLAGS.log_dir)

    logging.debug('Arguments:')
    for k, v in FLAGS.flag_values_dict().items():
        logging.debug(f'- {k}: {v}')

    # Set paths
    FLAGS.full_name = f'{FLAGS.model_name}_{FLAGS.input_name}'
    FLAGS.input_dir = os.path.join(FLAGS.data_npy_dir, FLAGS.input_name)
    FLAGS.model_file = os.path.join(FLAGS.result_model_dir, FLAGS.full_name)
    FLAGS.figure_file = os.path.join(FLAGS.result_figure_dir, FLAGS.full_name)

    # Run trainer
    trainer = get_trainer()
    if not FLAGS.no_train:
        logging.info("==== Training ====")
        trainer.run_train()

    if not FLAGS.no_test:
        logging.info("==== Testing ====")
        trainer.run_test()
示例#12
0
def main(_):
    gin.parse_config_file(FLAGS.gin_file)
    # warnings.filterwarnings('ignore') #TODO: revisit if this is necessary
    logging.get_absl_handler().use_absl_log_file('mlserver_cxr')

    ae = ApplicationEntity()
    ae.start_server(evt_handlers=Helper().handlers)
def main(argv):
    """Create directories and configure python settings"""

    # Setup Directory
    experiment_dir = os.path.join(FLAGS.dir, FLAGS.id)
    if not os.path.exists(experiment_dir):
        os.makedirs(os.path.join(experiment_dir, "logs"), exist_ok=True)

    # Setup Logging
    FLAGS.alsologtostderr = True
    logging.get_absl_handler().use_absl_log_file(
        FLAGS.logfile, os.path.join(experiment_dir, "logs"))

    # Setup seeds
    if FLAGS.random_seed:
        np.random.seed(FLAGS.random_seed)
        tf.random.set_seed(FLAGS.random_seed)

    # Log Flags
    log_flags(FLAGS)

    try:
        experiment()
    except:
        exception = traceback.format_exc()
        logging.info(exception)
示例#14
0
def main(argv):
    del argv
    with open(FLAGS.config_path) as json_data_file:
        config = json.load(json_data_file)
    config = munch.munchify(config)
    bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)
    tf.keras.backend.set_floatx(config.net_config.dtype)

    if not os.path.exists(FLAGS.log_dir):
        os.mkdir(FLAGS.log_dir)
    path_prefix = os.path.join(FLAGS.log_dir, FLAGS.exp_name)
    with open('{}_config.json'.format(path_prefix), 'w') as outfile:
        json.dump(dict((name, getattr(config, name)) for name in dir(config)
                       if not name.startswith('__')),
                  outfile,
                  indent=2)

    absl_logging.get_absl_handler().setFormatter(
        logging.Formatter('%(levelname)-6s %(message)s'))
    absl_logging.set_verbosity('info')

    logging.info('Begin to solve %s ' % config.eqn_config.eqn_name)
    bsde_solver = BSDESolver(config, bsde)
    training_history = bsde_solver.train()
    if bsde.y_init:
        logging.info('Y0_true: %.4e' % bsde.y_init)
        logging.info(
            'relative error of Y0: %s', '{:.2%}'.format(
                abs(bsde.y_init - training_history[-1, 2]) / bsde.y_init))
    np.savetxt('{}_training_history.csv'.format(path_prefix),
               training_history,
               fmt=['%d', '%.5e', '%.5e', '%d'],
               delimiter=",",
               header='step,loss_function,target_value,elapsed_time',
               comments='')
示例#15
0
文件: cli.py 项目: jackd/more-keras
def logging_config(to_file=True, log_dir=None, program_name='more_keras'):
    if to_file and log_dir is not None:
        log_dir = os.path.expanduser(os.path.expandvars(log_dir))
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        logging.info('Logging to {}'.format(log_dir))
        logging.get_absl_handler().use_absl_log_file(log_dir=log_dir,
                                                     program_name=program_name)
示例#16
0
def main(_):
    opt = FLAGS
    # logging
    logging.set_verbosity(logging.INFO)
    logging.set_stderrthreshold(logging.INFO)
    if FLAGS.log_dir:
        if not os.path.exists(FLAGS.log_dir):
            os.makedirs(FLAGS.log_dir)
        logging.get_absl_handler().use_absl_log_file(FLAGS.dataset,
                                                     log_dir=FLAGS.log_dir)
    # dataset
    if opt.dataset == 'mnist':
        data_train, data_test = tf.keras.datasets.mnist.load_data()
    elif opt.dataset == 'cifar10':
        data_train, data_test = tf.keras.datasets.cifar10.load_data()
    else:
        raise NotImplementError
    x_train, y_train = data_train
    x_test, y_test = data_test
    x_train = x_train.astype(np.float32)
    x_test = x_test.astype(np.float32)
    y_train = y_train.reshape([
        -1,
    ])
    y_test = y_test.reshape([
        -1,
    ])
    # resize to (32, 32)
    if opt.dataset == 'mnist':
        x_train = batch_resize(x_train, (32, 32))[..., None]
        x_test = batch_resize(x_test, (32, 32))[..., None]
    # normalization
    mean = x_train.mean()
    stddev = x_train.std()
    x_train = (x_train - mean) / stddev
    x_test = (x_test - mean) / stddev
    logging.info('{}, {}'.format(x_train.shape, x_test.shape))
    # define abnoraml data and normal
    # training data only contains normal
    x_train = x_train[y_train != opt.anomaly, ...]
    y_train = y_train[y_train != opt.anomaly, ...]
    y_test = (y_test == opt.anomaly).astype(np.float32)
    # tf.data.Dataset
    train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
    test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
    train_dataset = train_dataset.shuffle(opt.shuffle_buffer_size).batch(
        opt.batch_size, drop_remainder=True)
    test_dataset = test_dataset.batch(opt.batch_size, drop_remainder=False)

    # training
    ganomaly = GANomaly(opt,
                        train_dataset,
                        valid_dataset=None,
                        test_dataset=test_dataset)
    ganomaly.fit(opt.niter)

    # evaluating
    ganomaly.evaluate_best(test_dataset)
示例#17
0
def setup_logging_handler(log_dir):
    """Sets up logging.

    Args:
        log_dir (str): Path to directory where logs should be saved.
    """
    if log_dir:
        os.makedirs(log_dir, exist_ok=True)
        program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
        logging.get_absl_handler().use_absl_log_file(program_name, log_dir)
示例#18
0
文件: utils.py 项目: Lambda-Rec/RS
    def set_test_logging(self):

        self.log_path = os.path.join(self.workspace, 'test_log')
        if not os.path.exists(self.log_path):

            os.mkdir(self.log_path)

        logging.flush()
        logging.get_absl_handler().use_absl_log_file(self.exp_name + '.log',
                                                     self.log_path)
示例#19
0
def main(argv):
    logging.get_absl_handler().start_logging_to_file(FLAGS.log_file)

    print("logging path: ", logging.get_log_file_name())
    logging.info('Running under Python {0[0]}.{0[1]}.{0[2]}'.format(
        sys.version_info))
    logging.info("logging level: %d" % logging.get_verbosity())

    for i in range(50):
        logging.log_every_n(logging.INFO, "log_every_10", 10)
示例#20
0
def main(_):
    model_helpers.apply_clean(flags.FLAGS)
    logdir = '/tmp/logs'
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    logname = 'imagenet_strategy_{}_model_{}_node_{}_gpu_{}_patch_{}_proxy_{}'.format(
        flags.FLAGS.autodist_strategy, flags.FLAGS.cnn_model, node_num,
        gpu_num, flags.FLAGS.autodist_patch_tf, flags.FLAGS.proxy)
    logging.get_absl_handler().use_absl_log_file(logname, logdir)
    with logger.benchmark_context(flags.FLAGS):
        run(flags.FLAGS)
示例#21
0
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')

  logging.get_absl_handler().use_absl_log_file()

  logging.info('Opening %s', FLAGS.output_sqlite)
  db = smu_sqlite.SMUSQLite(FLAGS.output_sqlite, 'c')

  dataset = tf.data.TFRecordDataset(gfile.glob(FLAGS.input_tfrecord))
  db.bulk_insert((raw.numpy() for raw in dataset), batch_size=10000)
示例#22
0
def main(argv):
    # Don't know the purpose of the following line, seems to be related to different os
    del argv
    # Load config file
    with open(FLAGS.config_path) as json_data_file:
        config = json.load(json_data_file)
    # A munch is a python dictionary type, subclass of dict
    config = munch.munchify(config)

    # Get eqn_name problem object from equation.py and generate object using eqn_config
    ### --> See equation.py for details
    bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)

    # Set dtype globally
    tf.keras.backend.set_floatx(config.net_config.dtype)

    # Path for logging
    if not os.path.exists(FLAGS.log_dir):
        os.mkdir(FLAGS.log_dir)
    path_prefix = os.path.join(FLAGS.log_dir, FLAGS.exp_name)

    # Copy used configuration to log directory
    with open('{}_config.json'.format(path_prefix), 'w') as outfile:
        json.dump(dict((name, getattr(config, name)) for name in dir(config)
                       if not name.startswith('__')),
                  outfile,
                  indent=2)

        # ABSL - Abseil consists of source code repositories for C++ and Python
    absl_logging.get_absl_handler().setFormatter(
        logging.Formatter('%(levelname)-6s %(message)s'))
    absl_logging.set_verbosity('info')

    # Start logging
    logging.info('Begin to solve %s ' % config.eqn_config.eqn_name)

    # Call BSDE solver with config and problem equation
    bsde_solver = BSDESolver(config, bsde)

    training_history = bsde_solver.train()

    # If explicit solution is available, print some final statistics
    if bsde.y_init:
        logging.info('Y0_true: %.4e' % bsde.y_init)
        logging.info(
            'relative error of Y0: %s', '{:.2%}'.format(
                abs(bsde.y_init - training_history[-1, 2]) / bsde.y_init))
        np.savetxt('{}_training_history.csv'.format(path_prefix),
                   training_history,
                   fmt=['%d', '%.5e', '%.5e', '%d'],
                   delimiter=",",
                   header='step,loss_function,target_value,elapsed_time',
                   comments='')
示例#23
0
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')

  logging.get_absl_handler().use_absl_log_file()

  logging.info('Opening %s', FLAGS.input_sqlite)
  db = smu_sqlite.SMUSQLite(FLAGS.input_sqlite, 'r')
  if FLAGS.output_format == OutputFormat.pbtxt:
    outputter = PBTextOutputter(FLAGS.output_path)
  elif FLAGS.output_format == OutputFormat.sdf_init:
    outputter = SDFOutputter(
        FLAGS.output_path, init_geometry=True, opt_geometry=False)
  elif FLAGS.output_format == OutputFormat.sdf_opt:
    outputter = SDFOutputter(
        FLAGS.output_path, init_geometry=False, opt_geometry=True)
  elif FLAGS.output_format == OutputFormat.sdf_init_opt:
    outputter = SDFOutputter(
        FLAGS.output_path, init_geometry=True, opt_geometry=True)
  elif FLAGS.output_format == OutputFormat.atomic_input:
    outputter = AtomicInputOutputter(FLAGS.output_path)
  elif FLAGS.output_format == OutputFormat.tfdata:
    outputter = TfDataOutputter(FLAGS.output_path)
  else:
    raise ValueError(f'Bad output format {FLAGS.output_format}')

  if FLAGS.redetect_geometry:
    outputter = ReDetectTopologiesOutputter(outputter)

  with contextlib.closing(outputter):
    for cid in (int(x) for x in FLAGS.cids):
      conformer = db.find_by_conformer_id(cid)
      outputter.output(conformer)
    for btid in (int(x) for x in FLAGS.btids):
      conformers = db.find_by_bond_topology_id(btid)
      if not conformers:
        raise KeyError(f'Bond topology {btid} not found')
      for c in conformers:
        outputter.output(c)
    for smiles in FLAGS.smiles:
      conformers = db.find_by_smiles(smiles)
      if not conformers:
        raise KeyError(f'SMILES {smiles} not found')
      for c in conformers:
        outputter.output(c)
    for smiles in FLAGS.topology_query_smiles:
      for c in topology_query(db, smiles):
        outputter.output(c)
    if FLAGS.random_fraction:
      for conformer in db:
        if conformer.fate == dataset_pb2.Conformer.FATE_SUCCESS and random.random(
        ) < FLAGS.random_fraction:
          outputter.output(conformer)
示例#24
0
def get_logger():
    """Retrieves tensorflow logger and changes log formatting."""
    formatting = "%(asctime)s: %(levelname)s %(filename)s:%(lineno)s] %(message)s"
    formatter = logging.Formatter(formatting)
    absl_logging.get_absl_handler().setFormatter(formatter)

    for h in tf.get_logger().handlers:
        h.setFormatter(formatter)

    logger = tf.get_logger()
    logger.setLevel(logging.INFO)
    return logger
示例#25
0
def main(argv):
    """driver for flask application"""
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')
    # absl logging set to file logging if specified
    if FLAGS.log_dir:
        if not os.path.exists(FLAGS.log_dir):
            os.makedirs(FLAGS.log_dir)
        logging.get_absl_handler().use_absl_log_file()
    logging.info('Techex started')
    flask_app = _create_flask_app()
    flask_app.run(host=FLAGS.flask_host, port=FLAGS.flask_port)
示例#26
0
def main(_):
    logging.get_absl_handler().setFormatter(CustomPythonFormatter())
    logging.set_verbosity(logging.DEBUG)

    model = IMDBSentimentClassifier(learning_rate=FLAGS.lr)
    trainer = pl.Trainer(
        default_root_dir='logs',
        gpus=(1 if th.cuda.is_available() else 0),
        max_epochs=FLAGS.epochs,
        fast_dev_run=FLAGS.debug,
        logger=pl.loggers.TensorBoardLogger('logs/', name='imdb', version=0),
    )
    trainer.fit(model)
def main(argv):
    """Create directories and configure python settings"""

    # Setup Directory
    experiment_dir = os.path.join(FLAGS.dir, FLAGS.id)
    if not os.path.exists(experiment_dir):
        os.makedirs(os.path.join(experiment_dir, "logs"), exist_ok=True)

    # Setup Logging
    FLAGS.alsologtostderr = True
    logging.get_absl_handler().use_absl_log_file(
        FLAGS.logfile, os.path.join(experiment_dir, "logs"))

    # Setup Distributed
    if FLAGS.distributed:
        try:
            hvd.init()
            gpus = tf.config.list_physical_devices('GPU')
            logging.info("Distributed training enabled.")
            logging.info("GPUS: %s", str(gpus))
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            if gpus:
                tf.config.experimental.set_visible_devices(
                    gpus[hvd.local_rank()], 'GPU')
            FLAGS.model_cfgs = (FLAGS.model_cfgs +
                                ",distributed=True").strip(',')
        except:
            logging.info(
                "Distributed training training setup failed. Disabling distributed training."
            )
        if FLAGS.random_seed:
            logging.info("Setting seed to %s", FLAGS.random_seed + hvd.rank())
            np.random.seed(FLAGS.random_seed + hvd.rank())
            tf.random.set_seed(FLAGS.random_seed + hvd.rank())
    else:
        # Setup seeds
        if FLAGS.random_seed:
            logging.info("Setting seed to %s", FLAGS.random_seed)
            np.random.seed(FLAGS.random_seed)
            tf.random.set_seed(FLAGS.random_seed)

    # Log Flags
    if (not FLAGS.distributed) or (hvd.rank() == 0):
        log_flags(FLAGS)

    try:
        experiment()
    except:
        exception = traceback.format_exc()
        logging.info(exception)
示例#28
0
def main(argv):
  del argv  # Unused.

  test_name = os.environ.get('TEST_NAME', None)
  test_fn = globals().get('_test_%s' % test_name)
  if test_fn is None:
    raise AssertionError('TEST_NAME must be set to a valid value')
  # Flush so previous messages are written to file before we switch to a new
  # file with use_absl_log_file.
  logging.flush()
  if os.environ.get('USE_ABSL_LOG_FILE') == '1':
    logging.get_absl_handler().use_absl_log_file('absl_log_file', FLAGS.log_dir)

  test_fn()
示例#29
0
def main(unused_argv) -> None:
    logging.get_absl_handler().use_absl_log_file()
    es = None
    try:
        while True:
            logging.info('Starting Run')
            es = ExitSpeed()
            es.Run()
    except KeyboardInterrupt:
        logging.info('Keyboard interrupt')
    finally:
        if hasattr(es, 'point'):
            logging.info('Logging last point\n %s', es.point)
        logging.info('Done.\nExiting.')
        logging.exception('Ensure we log any exceptions')
示例#30
0
 def __init__(self) -> None:
     rest = gerrit.get_gerrit_rest_api(COOKIE_JAR_PATH, GERRIT_URL)
     self.gerrit = gerrit.Gerrit(rest)
     self.gerrit_git = git.GerritGit(
         git_dir='gerrit_git_dir',
         cookie_jar_path=COOKIE_JAR_PATH,
         url=GOB_URL,
         project='linux/kernel/git/torvalds/linux',
         branch='master')
     self.message_dao = message_dao.MessageDao()
     self.archive_index = ArchiveMessageIndex(self.message_dao)
     self.last_hash = self.message_dao.get_last_hash()
     archive_updater.setup_archive(GIT_PATH)
     os.makedirs(FILE_DIR, exist_ok=True)
     os.makedirs(LOG_PATH, exist_ok=True)
     logging.get_absl_handler().use_absl_log_file('server_logs', LOG_PATH)