Beispiel #1
0
def get_session_conf(config):
    """Get the config for the tensorflow session."""
    tfconf = config['solver']['run_config']
    session_conf = tf.ConfigProto(
        allow_soft_placement=tfconf['allow_soft_placement'],
        log_device_placement=tfconf['log_device_placement'],
        intra_op_parallelism_threads=tfconf['intra_op_parallelism_threads'],
        inter_op_parallelism_threads=tfconf['inter_op_parallelism_threads'],
        gpu_options=tf.GPUOptions(allow_growth=tfconf['allow_growth']))
    return session_conf
Beispiel #2
0
    def init_session(self, model, gpu_str):
        # The config for CPU usage
        config = tf.ConfigProto()
        if not gpu_str:
            config.gpu_options.visible_device_list = ''  # pylint: disable=no-member
        else:
            config.gpu_options.visible_device_list = gpu_str  # pylint: disable=no-member
            config.gpu_options.allow_growth = True  # pylint: disable=no-member

        #check model dir
        if os.path.isdir(model):
            self._graph = tf.Graph()

            if tf.saved_model.maybe_saved_model_directory(model):
                #saved model
                logging.info('saved model dir: {}'.format(model))
                self._sess = tf.Session(graph=self._graph, config=config)
                tf.saved_model.loader.load(
                    self._sess, [tf.saved_model.tag_constants.SERVING], model)
            else:
                #checkpoint
                self._sess = tf.Session(graph=self._graph,
                                        config=tf.ConfigProto(
                                            allow_soft_placement=True,
                                            log_device_placement=True))
                ckpt_path = tf.train.latest_checkpoint(model)
                # self._graph, self._sess = utils.load_graph_session_from_ckpt(ckpt_path)
                model = ckpt_path + '.meta'
                logging.info("meta : {}".format(model))
                saver = tf.train.import_meta_graph(model)
                saver.restore(self._sess, ckpt_path)

        else:
            if not os.path.exists(model):
                logging.info('{}, is not exist'.format(model))
                logging.info("frozen_graph : {} not exist".format(model))
                sys.exit(0)

            #frozen graph pb
            frozen_graph = model
            logging.info('frozen graph pb : {}'.format(frozen_graph))
            self._graph = utils.load_frozen_graph(frozen_graph)
            self._sess = tf.Session(graph=self._graph, config=config)
Beispiel #3
0
def get_sess_config(gpu_str=None):
    """generate a session config proto"""
    config = tf.ConfigProto()

    # pylint: disable=no-member
    if gpu_str is None:
        config.gpu_options.visible_device_list = ''
    else:
        config.gpu_options.visible_device_list = gpu_str
        config.gpu_options.allow_growth = True
    return config
Beispiel #4
0
    def create_estimator(self):
        # Set model params
        model_params = HParams()

        # create model func
        model_fn = self.model_fn()

        # multi-gpus
        devices, num_gpu = utils.gpu_device_names()
        distribution = utils.get_distribution_strategy(num_gpu)
        logging.info('Device: {}/{}'.format(num_gpu, devices))

        # run config
        tfconf = self.config['solver']['run_config']
        saverconf = self.config['solver']['saver']
        session_config = tf.ConfigProto(
            allow_soft_placement=tfconf['allow_soft_placement'],
            log_device_placement=tfconf['log_device_placement'],
            intra_op_parallelism_threads=tfconf[
                'intra_op_parallelism_threads'],
            inter_op_parallelism_threads=tfconf[
                'inter_op_parallelism_threads'],
            gpu_options=tf.GPUOptions(allow_growth=tfconf['allow_growth']))

        run_config = tf.estimator.RunConfig(  #pylint: disable=no-member
            tf_random_seed=tfconf['tf_random_seed'],
            session_config=session_config,
            save_summary_steps=saverconf['save_summary_steps'],
            keep_checkpoint_max=saverconf['max_to_keep'],
            log_step_count_steps=tfconf['log_step_count_steps'],
            train_distribute=distribution,
            device_fn=None,
            protocol=None,
            eval_distribute=None,
            experimental_distribute=None,
        )

        # Instantiate Estimator
        nn = tf.estimator.Estimator(  #pylint: disable=no-member,invalid-name
            model_fn=model_fn,
            model_dir=saverconf['model_path'],
            config=run_config,
            params=model_params,
            warm_start_from=None,
        )
        return nn