示例#1
0
    def load_estimator(self):
        """

        Returns
        -------
        estimator
            A tf.estimator.DNNClassifier

        """

        # Feature columns describe how to use the input.
        my_feature_columns = []
        for key in self.train_x.keys():
            my_feature_columns.append(
                tf.feature_column.numeric_column(key=key))

        run_config = RunConfig()
        run_config = run_config.replace(model_dir=self.model_path)

        return tf.estimator.DNNClassifier(
            feature_columns=my_feature_columns,
            # Two hidden layers of 10 nodes each.
            hidden_units=[10, 10],
            # The model must choose between 3 classes.
            n_classes=3,
            # Use runconfig to load model,
            config=run_config,
            model_dir=self.model_path)
示例#2
0
文件: train.py 项目: tongda/ImSAT
def main():
    tf.logging.set_verbosity(tf.logging.DEBUG)

    parsed_args = get_parser().parse_args()

    session_config = tf.ConfigProto(allow_soft_placement=True)
    session_config.gpu_options.allow_growth = True
    run_config = RunConfig(session_config=session_config)
    run_config = run_config.replace(model_dir=get_model_dir(parsed_args))

    params = HParams(learning_rate=parsed_args.lr,
                     train_steps=parsed_args.train_steps,
                     steps_per_eval=parsed_args.steps_per_eval,
                     batch_size=parsed_args.batch_size,
                     vgg_model_path=parsed_args.vgg_model_path,
                     selector=parsed_args.selector,
                     dropout=parsed_args.dropout,
                     ctx2out=parsed_args.ctx2out,
                     prev2out=parsed_args.prev2out,
                     dataset=parsed_args.dataset,
                     eval_steps=parsed_args.eval_steps,
                     hard_attention=parsed_args.hard_attention,
                     use_sampler=parsed_args.use_sampler,
                     bin_size=14)

    learn_runner.run(experiment_fn=experiment_fn_inner,
                     run_config=run_config,
                     schedule="continuous_train_and_eval",
                     hparams=params)
示例#3
0
    args = parser.parse_args()

    # Input pipe settings
    input_param = {
        'data_dir': args.data_dir,
        'batch_size': args.batch_size,
        'buffer_size': args.buffer_size,
        'epochs': args.train_epochs,
        'num_parallel_calls': args.num_parallel_calls,
        'img_sizes': input_pipe.get_tf_record_image_size(args.data_dir),
        'padding': args.unet_padding
    }

    # Create run configuration default
    run_config = RunConfig()
    run_config = run_config.replace(model_dir=os.path.join(args.output_dir, args.model_type))
    run_config = run_config.replace(save_summary_steps=args.save_summary_steps)
    run_config = run_config.replace(save_checkpoints_steps=args.save_checkpoints_steps)

    # Define model and input parameters
    hparams = HParams(
        learning_rate=args.learning_rate,
        l2_gain=args.l2_gain,
        model_type=args.model_type,
        rmsprop_momentum=args.rmsprop_momentum,
        opt_epsilon=args.opt_epsilon,
        rmsprop_decay=args.rmsprop_decay,
        padding=args.unet_padding,
        optimizer=args.optimizer,
        model_dir=run_config.model_dir
    )