Exemplo n.º 1
0
    def from_file(cls, filename, logger=None, optional_dict2=None):
        # override TrainTestModel.from_file

        with open(filename, 'rb') as file:
            info_loaded = pickle.load(file)

        train_test_model = cls(param_dict={},
                               logger=logger,
                               optional_dict2=optional_dict2)
        train_test_model.param_dict = info_loaded['param_dict']
        train_test_model.model_dict = info_loaded['model_dict']

        # == special handling of tensorflow: load .model differently ==

        input_image_batch, logits, y_, y_p, W_conv0, W_conv1, loss, train_step \
            = cls.create_tf_variables(train_test_model.param_dict)

        saver = tf.train.Saver()
        sess = tf.Session()
        checkpoint_dir = get_dir_without_last_slash(filename)
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            assert False
        model = {
            'sess': sess,
            'y_p': y_p,
            'input_image_batch': input_image_batch,
        }
        train_test_model.model_dict['model'] = model

        return train_test_model
Exemplo n.º 2
0
    def from_file(cls, filename, logger=None, optional_dict2=None):
        # override TrainTestModel.from_file

        with open(filename, 'rb') as file:
            info_loaded = pickle.load(file)

        train_test_model = cls(param_dict={}, logger=logger,
                               optional_dict2=optional_dict2)
        train_test_model.param_dict = info_loaded['param_dict']
        train_test_model.model_dict = info_loaded['model_dict']

        # == special handling of tensorflow: load .model differently ==

        input_image_batch, logits, y_, y_p, W_conv0, W_conv1, loss, train_step \
            = cls.create_tf_variables(train_test_model.param_dict)

        saver = tf.train.Saver()
        sess = tf.Session()
        checkpoint_dir = get_dir_without_last_slash(filename)
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            assert False
        model = {
            'sess': sess,
            'y_p': y_p,
            'input_image_batch': input_image_batch,
        }
        train_test_model.model_dict['model'] = model

        return train_test_model
Exemplo n.º 3
0
 def delete(filename):
     """
     override TrainTestModel.delete
     """
     if os.path.exists(filename):
         os.remove(filename)
     if os.path.exists(filename + '.model'):
         os.remove(filename + '.model')
     if os.path.exists(filename + '.model.meta'):
         os.remove(filename + '.model.meta')
     filedir = get_dir_without_last_slash(filename)
     if os.path.exists(filedir + '/checkpoint'):
         os.remove(filedir + '/checkpoint')
Exemplo n.º 4
0
 def delete(filename):
     """
     override TrainTestModel.delete(filename)
     """
     if os.path.exists(filename):
         os.remove(filename)
     if os.path.exists(filename + '.model'):
         os.remove(filename + '.model')
     if os.path.exists(filename + '.model.meta'):
         os.remove(filename + '.model.meta')
     filedir = get_dir_without_last_slash(filename)
     if os.path.exists(filedir + '/checkpoint'):
         os.remove(filedir + '/checkpoint')
Exemplo n.º 5
0
    def _run_on_asset(self, asset):
        # Override Executor._run_on_asset to skip working on ref video

        if self.result_store:
            result = self.result_store.load(asset, self.executor_id)
        else:
            result = None

        # if result can be retrieved from result_store, skip log file
        # generation and reading result from log file, but directly return
        # return the retrieved result
        if result is not None:
            if self.logger:
                self.logger.info('{id} result exists. Skip {id} run.'.
                                 format(id=self.executor_id))
        else:

            if self.logger:
                self.logger.info('{id} result does\'t exist. Perform {id} '
                                 'calculation.'.format(id=self.executor_id))

            # at this stage, it is certain that asset.ref_path and
            # asset.dis_path will be used. must early determine that
            # they exists
            self._assert_paths(asset)

            # if no rescaling is involved, directly work on ref_path/dis_path,
            # instead of opening workfiles
            self._set_asset_use_path_as_workpath(asset)

            # remove workfiles if exist (do early here to avoid race condition
            # when ref path and dis path have some overlap)
            if asset.use_path_as_workpath:
                # do nothing
                pass
            else:
                self._close_dis_workfile(asset)

            log_file_path = self._get_log_file_path(asset)
            make_parent_dirs_if_nonexist(log_file_path)

            if asset.use_path_as_workpath:
                # do nothing
                pass
            else:
                if self.fifo_mode:
                    dis_p = multiprocessing.Process(target=self._open_dis_workfile,
                                                    args=(asset, True))
                    dis_p.start()
                    self._wait_for_workfiles(asset)
                else:
                    self._open_dis_workfile(asset, fifo_mode=False)

            self._prepare_log_file(asset)

            self._generate_result(asset)

            # clean up workfiles
            if self.delete_workdir:
                if asset.use_path_as_workpath:
                    # do nothing
                    pass
                else:
                    self._close_dis_workfile(asset)

            if self.logger:
                self.logger.info("Read {id} log file, get scores...".
                                 format(type=self.executor_id))

            # collect result from each asset's log file
            result = self._read_result(asset)

            # save result
            if self.result_store:
                self.result_store.save(result)

            # clean up workdir and log files in it
            if self.delete_workdir:

                # remove log file
                self._remove_log(asset)

                # remove dir
                log_file_path = self._get_log_file_path(asset)
                log_dir = get_dir_without_last_slash(log_file_path)
                try:
                    os.rmdir(log_dir)
                except OSError as e:
                    if e.errno == 39: # [Errno 39] Directory not empty
                        # VQM could generate an error file with non-critical
                        # information like: '3 File is longer than 15 seconds.
                        # Results will be calculated using first 15 seconds
                        # only.' In this case, want to keep this
                        # informational file and pass
                        pass

        result = self._post_process_result(result)

        return result
Exemplo n.º 6
0
    def _run_on_asset(self, asset):
        # Wraper around the essential function _generate_result, to
        # do housekeeping work including 1) asserts of asset, 2) skip run if
        # log already exist, 3) creating fifo, 4) delete work file and dir

        # asserts
        self._assert_an_asset(asset)

        if self.result_store:
            result = self.result_store.load(asset, self.executor_id)
        else:
            result = None

        # if result can be retrieved from result_store, skip log file
        # generation and reading result from log file, but directly return
        # return the retrieved result
        if result is not None:
            if self.logger:
                self.logger.info('{id} result exists. Skip {id} run.'.
                                 format(id=self.executor_id))
        else:

            if self.logger:
                self.logger.info('{id} result does\'t exist. Perform {id} '
                                 'calculation.'.format(id=self.executor_id))

            # at this stage, it is certain that asset.ref_path and
            # asset.dis_path will be used. must early determine that
            # they exists
            self._assert_paths(asset)

            # if no rescaling is involved, directly work on ref_path/dis_path,
            # instead of opening workfiles
            self._set_asset_use_path_as_workpath(asset)

            # remove workfiles if exist (do early here to avoid race condition
            # when ref path and dis path have some overlap)
            if asset.use_path_as_workpath:
                # do nothing
                pass
            else:
                self._close_ref_workfile(asset)
                self._close_dis_workfile(asset)

            log_file_path = self._get_log_file_path(asset)
            make_parent_dirs_if_nonexist(log_file_path)

            if asset.use_path_as_workpath:
                # do nothing
                pass
            else:
                if self.fifo_mode:
                    ref_p = multiprocessing.Process(target=self._open_ref_workfile,
                                                    args=(asset, True))
                    dis_p = multiprocessing.Process(target=self._open_dis_workfile,
                                                    args=(asset, True))
                    ref_p.start()
                    dis_p.start()
                    self._wait_for_workfiles(asset)
                else:
                    self._open_ref_workfile(asset, fifo_mode=False)
                    self._open_dis_workfile(asset, fifo_mode=False)

            self._prepare_log_file(asset)

            self._generate_result(asset)

            # clean up workfiles
            if self.delete_workdir:
                if asset.use_path_as_workpath:
                    # do nothing
                    pass
                else:
                    self._close_ref_workfile(asset)
                    self._close_dis_workfile(asset)

            if self.logger:
                self.logger.info("Read {id} log file, get scores...".
                                 format(type=self.executor_id))

            # collect result from each asset's log file
            result = self._read_result(asset)

            # save result
            if self.result_store:
                self.result_store.save(result)

            # clean up workdir and log files in it
            if self.delete_workdir:

                # remove log file
                self._remove_log(asset)

                # remove dir
                log_file_path = self._get_log_file_path(asset)
                log_dir = get_dir_without_last_slash(log_file_path)
                try:
                    os.rmdir(log_dir)
                except OSError as e:
                    if e.errno == 39: # [Errno 39] Directory not empty
                        # VQM could generate an error file with non-critical
                        # information like: '3 File is longer than 15 seconds.
                        # Results will be calculated using first 15 seconds
                        # only.' In this case, want to keep this
                        # informational file and pass
                        pass

        result = self._post_process_result(result)

        return result