def testTrainSave(self):
        model_type = 'mockfasterrcnn'

        # Save checkpoints to a temp directory.
        tmp_job_dir = tempfile.mkdtemp()
        override_params = [
            'train.num_epochs={}'.format(self.total_epochs),
            'train.job_dir={}'.format(tmp_job_dir),
            'train.run_name=test_runname',
        ]

        config = self.get_config(model_type, override_params=override_params)

        step = run(config,
                   get_dataset_fn=self.get_dataset,
                   get_model_fn=self.get_model)
        self.assertEqual(step, 2)

        # We have to reset the graph to avoid having duplicate names.
        tf.reset_default_graph()
        step = run(config,
                   get_dataset_fn=self.get_dataset,
                   get_model_fn=self.get_model)

        # This is because of a MonitoredTrainingSession "bug".
        # When ending training it saves a checkpoint as the next step.
        # That causes that we are one step ahead when loading it.
        self.assertEqual(step, 5)
示例#2
0
    def testTrainSave(self):
        model_type = 'mockfasterrcnn'

        # Save checkpoints to a temp directory.
        tmp_job_dir = tempfile.mkdtemp()
        override_params = [
            'train.num_epochs={}'.format(self.total_epochs),
            'train.job_dir={}'.format(tmp_job_dir),
            'train.run_name=test_runname',
        ]

        config = self.get_config(model_type, override_params=override_params)

        run(config,
            get_dataset_fn=self.get_dataset,
            get_model_fn=self.get_model)

        # Create new graph which will load previously saved checkpoint
        tf.reset_default_graph()
        new_session = tf.Session()
        new_saver = tf.train.import_meta_graph(
            tmp_job_dir + '/test_runname/model.ckpt-3.meta')
        new_saver.restore(new_session,
                          tmp_job_dir + '/test_runname/model.ckpt-3')

        # Get tensor from graph and run it in session
        w_tensor = tf.get_default_graph().get_tensor_by_name(
            "mockfasterrcnn/w:0")
        w_numpy = new_session.run(w_tensor)

        # Assert we correctly loaded the weight
        self.assertArrayNear(w_numpy, [2.5, 3.0], err=0.01)
示例#3
0
    def testTrainSave(self):
        model_type = 'mockfasterrcnn'

        # Save checkpoints to a temp directory.
        tmp_job_dir = tempfile.mkdtemp()
        override_params = [
            'train.num_epochs={}'.format(self.total_epochs),
            'train.job_dir={}'.format(tmp_job_dir),
            'train.run_name=test_runname',
        ]

        config = self.get_config(model_type, override_params=override_params)

        step = run(
            config,
            get_dataset_fn=self.get_dataset, get_model_fn=self.get_model
        )
        self.assertEqual(step, 2)

        # We have to reset the graph to avoid having duplicate names.
        tf.reset_default_graph()
        step = run(
            config,
            get_dataset_fn=self.get_dataset, get_model_fn=self.get_model
        )

        # This is because of a MonitoredTrainingSession "bug".
        # When ending training it saves a checkpoint as the next step.
        # That causes that we are one step ahead when loading it.
        self.assertEqual(step, 5)
示例#4
0
    def testTrain(self):
        config = self.config

        custom_config = load_config(config.config_files)
        # The string we use here is ignored.
        model_type = 'mockfasterrcnn'

        # This should not fail
        run(custom_config, model_type, config.override_params,
            get_dataset_fn=self.get_dataset, get_model_fn=self.get_model)
示例#5
0
    def testTrain(self):
        config = self.config

        # This should not fail
        run(config.model_type,
            config.dataset_type,
            config.config_file,
            config.override_params,
            run_name=config.run_name,
            save_summaries_secs=config.save_summaries_secs,
            get_dataset_fn=self.get_dataset,
            get_model_fn=self.get_model)
    def testTrain(self):
        model_type = 'mockfasterrcnn'

        override_params = [
            'train.num_epochs={}'.format(self.total_epochs),
            'train.job_dir=',
        ]

        config = self.get_config(model_type, override_params=override_params)

        # This should not fail
        run(config,
            get_dataset_fn=self.get_dataset,
            get_model_fn=self.get_model)
示例#7
0
    def testTrain(self):
        model_type = 'mockfasterrcnn'

        override_params = [
            'train.num_epochs={}'.format(self.total_epochs),
            'train.job_dir=',
        ]

        config = self.get_config(model_type, override_params=override_params)

        # This should not fail
        run(
            config, get_dataset_fn=self.get_dataset,
            get_model_fn=self.get_model
        )
示例#8
0
    def testTrain(self):
        custom_config = load_config(self.config.config_files)
        # The string we use here is ignored.
        model_type = 'mockfasterrcnn'

        self.config.override_params = [
            'train.num_epochs={}'.format(self.total_epochs),
            'train.job_dir=',
        ]

        # This should not fail
        run(custom_config,
            model_type,
            self.config.override_params,
            get_dataset_fn=self.get_dataset,
            get_model_fn=self.get_model)