示例#1
0
def match_and_write_filenames_to_csv(list_constraints, csv_file):
    """
    Combine all elements of file searching until finally writing the names
    :param list_constraints: list of constraints (defined by list of paths to
    search, list of elements the filename should contain and of those that
    are forbidden
    :param csv_file: file on which to write the final list of files.
    :return:
    """
    name_tot = []
    list_tot = []
    if list_constraints is None or len(list_constraints) == 0:
        return
    for c in list_constraints:
        list_files, name_list = \
            KeywordsMatching.matching_subjects_and_filenames(c)
        name_list = remove_duplicated_names(name_list)
        name_tot.append(name_list)
        list_tot.append(list_files)
    list_combined = join_subject_id_and_filename_list(name_tot, list_tot)
    touch_folder(os.path.dirname(csv_file))

    # csv writer has different behaviour in python 2/3
    if sys.version_info[0] >= 3:
        with open(csv_file, 'w', newline='', encoding='utf8') as csvfile:
            file_writer = csv.writer(csvfile)
            for list_temp in list_combined:
                file_writer.writerow(list_temp)
    else:
        with open(csv_file, 'wb') as csvfile:
            file_writer = csv.writer(csvfile, delimiter=',')
            for list_temp in list_combined:
                file_writer.writerow(list_temp)
    return
示例#2
0
def match_and_write_filenames_to_csv(list_constraints, csv_file):
    """
    Combine all elements of file searching until finally writing the names
    :param list_constraints: list of constraints (defined by list of paths to
    search, list of elements the filename should contain and of those that
    are forbidden
    :param csv_file: file on which to write the final list of files.
    :return:
    """
    name_tot = []
    list_tot = []
    if list_constraints is None or len(list_constraints) == 0:
        return
    for c in list_constraints:
        list_files, name_list = \
            KeywordsMatching.matching_subjects_and_filenames(c)
        name_list = remove_duplicated_names(name_list)
        name_tot.append(name_list)
        list_tot.append(list_files)
    list_combined = join_subject_id_and_filename_list(name_tot, list_tot)
    list_combined = filter(lambda names: '' not in names, list_combined)
    list_combined = list(list_combined)
    touch_folder(os.path.dirname(csv_file))
    write_csv(csv_file, list_combined)

    return list_combined
示例#3
0
def match_and_write_filenames_to_csv(list_constraints, csv_file):
    """
    Combine all elements of file searching until finally writing the names
    :param list_constraints: list of constraints (defined by list of paths to
    search, list of elements the filename should contain and of those that
    are forbidden
    :param csv_file: file on which to write the final list of files.
    :return:
    """
    name_tot = []
    list_tot = []
    if list_constraints is None or len(list_constraints) == 0:
        return
    for c in list_constraints:
        list_files, name_list = \
            KeywordsMatching.matching_subjects_and_filenames(c)
        name_list = remove_duplicated_names(name_list)
        name_tot.append(name_list)
        list_tot.append(list_files)
    list_combined = join_subject_id_and_filename_list(name_tot, list_tot)
    list_combined = filter(lambda names: '' not in names, list_combined)
    list_combined = list(list_combined)
    if not list_combined:
        raise IOError('Nothing to write to {}'.format(csv_file))
    touch_folder(os.path.dirname(csv_file))
    write_csv(csv_file, list_combined)

    return list_combined
示例#4
0
def main():
    system_param, input_data_param = user_parameters_parser.run()
    if util.has_bad_inputs(system_param):
        return -1

    # print all parameters to txt file for future reference
    all_param = {}
    all_param.update(system_param)
    all_param.update(input_data_param)
    txt_file = 'settings_{}.txt'.format(system_param['SYSTEM'].action)
    model_folder = touch_folder(system_param['SYSTEM'].model_dir)
    txt_file = os.path.join(model_folder, txt_file)
    util.print_save_input_parameters(all_param, txt_file)

    # keep all commandline outputs
    log_file_name = os.path.join(
        model_folder, '{}_{}'.format(all_param['SYSTEM'].action,
                                     'niftynet_log'))
    set_logger(file_name=log_file_name)

    # start application
    app_driver = ApplicationDriver()
    app_driver.initialise_application(system_param, input_data_param)
    app_driver.run_application()
    return 0
示例#5
0
def write_all_mod_mapping(hist_model_file, mapping):
    # backup existing file first
    if os.path.exists(hist_model_file):
        backup_name = '{}.backup'.format(hist_model_file)
        from shutil import copyfile
        try:
            copyfile(hist_model_file, backup_name)
        except OSError:
            tf.logging.warning('cannot backup file {}'.format(hist_model_file))
            raise
        tf.logging.warning("moved existing histogram reference file\n"
                           " from {} to {}".format(hist_model_file,
                                                   backup_name))

    touch_folder(os.path.dirname(hist_model_file))
    __force_writing_new_mapping(hist_model_file, mapping)
def write_all_mod_mapping(hist_model_file, mapping):
    # backup existing file first
    if os.path.exists(hist_model_file):
        backup_name = '{}.backup'.format(hist_model_file)
        from shutil import copyfile
        try:
            copyfile(hist_model_file, backup_name)
        except OSError:
            tf.logging.warning('cannot backup file {}'.format(hist_model_file))
            raise
        tf.logging.warning(
            "moved existing histogram reference file\n"
            " from {} to {}".format(hist_model_file, backup_name))

    touch_folder(os.path.dirname(hist_model_file))
    __force_writing_new_mapping(hist_model_file, mapping)
    def initialise_application(self, workflow_param, data_param):
        """
        This function receives all parameters from user config file,
        create an instance of application.
        :param workflow_param: a dictionary of user parameters,
        keys correspond to sections in the config file
        :param data_param: a dictionary of input image parameters,
        keys correspond to data properties to be used by image_reader
        :return:
        """
        try:
            system_param = workflow_param.get('SYSTEM', None)
            net_param = workflow_param.get('NETWORK', None)
            infer_param = workflow_param.get('INFERENCE', None)
            eval_param = workflow_param.get('EVALUATION', None)
            app_param = workflow_param.get('CUSTOM', None)
        except AttributeError:
            tf.logging.fatal('parameters should be dictionaries')
            raise
        self.num_threads = 1
        # self.num_threads = max(system_param.num_threads, 1)
        # self.num_gpus = system_param.num_gpus
        # set_cuda_device(system_param.cuda_devices)

        # set output TF model folders
        self.model_dir = touch_folder(
            os.path.join(system_param.model_dir, 'models'))
        self.session_prefix = os.path.join(self.model_dir, FILE_PREFIX)

        assert infer_param, 'inference parameters not specified'

        # create an application instance
        assert app_param, 'application specific param. not specified'
        self.app_param = app_param
        app_module = ApplicationFactory.create(app_param.name)
        self.app = app_module(net_param, infer_param,
                              system_param.action)

        self.eval_param = eval_param

        data_param, self.app_param = \
            self.app.add_inferred_output(data_param, self.app_param)
        # initialise data input
        data_partitioner = ImageSetsPartitioner()
        # clear the cached file lists
        data_partitioner.reset()
        if data_param:
            data_partitioner.initialise(
                data_param=data_param,
                new_partition=False,
                ratios=None,
                data_split_file=system_param.dataset_split_file)

        # initialise data input
        self.app.initialise_dataset_loader(data_param, self.app_param,
                                           data_partitioner)
        self.app.initialise_evaluator(eval_param)
示例#8
0
    def initialise_application(self, workflow_param, data_param):
        """
        This function receives all parameters from user config file,
        create an instance of application.
        :param workflow_param: a dictionary of user parameters,
        keys correspond to sections in the config file
        :param data_param: a dictionary of input image parameters,
        keys correspond to data properties to be used by image_reader
        :return:
        """
        try:
            system_param = workflow_param.get('SYSTEM', None)
            net_param = workflow_param.get('NETWORK', None)
            infer_param = workflow_param.get('INFERENCE', None)
            eval_param = workflow_param.get('EVALUATION', None)
            app_param = workflow_param.get('CUSTOM', None)
        except AttributeError:
            tf.logging.fatal('parameters should be dictionaries')
            raise
        self.num_threads = 1
        # self.num_threads = max(system_param.num_threads, 1)
        # self.num_gpus = system_param.num_gpus
        # set_cuda_device(system_param.cuda_devices)

        # set output TF model folders
        self.model_dir = touch_folder(
            os.path.join(system_param.model_dir, 'models'))
        self.session_prefix = os.path.join(self.model_dir, FILE_PREFIX)

        assert infer_param, 'inference parameters not specified'

        # create an application instance
        assert app_param, 'application specific param. not specified'
        self.app_param = app_param
        app_module = ApplicationFactory.create(app_param.name)
        self.app = app_module(net_param, infer_param, system_param.action)

        self.eval_param = eval_param

        data_param, self.app_param = \
            self.app.add_inferred_output(data_param, self.app_param)
        # initialise data input
        data_partitioner = ImageSetsPartitioner()
        # clear the cached file lists
        data_partitioner.reset()
        if data_param:
            data_partitioner.initialise(
                data_param=data_param,
                new_partition=False,
                ratios=None,
                data_split_file=system_param.dataset_split_file)

        # initialise data input
        self.app.initialise_dataset_loader(data_param, self.app_param,
                                           data_partitioner)
        self.app.initialise_evaluator(eval_param)
示例#9
0
def make_model_name(model_dir):
    """
    Make the model checkpoint folder.
    the checkpoint file will be located at `model_dir/models/` folder,
    the filename will start with FILE_PREFIX.

    :param model_dir: niftynet model folder
    :return: a partial name of a checkpoint file `model_dir/model/FILE_PREFIX`
    """
    _model_dir = touch_folder(os.path.join(model_dir, 'models'))
    return os.path.join(_model_dir, FILE_PREFIX)
示例#10
0
    def initialise_application(self, workflow_param, data_param):
        """
        This function receives all parameters from user config file,
        create an instance of application.
        :param workflow_param: a dictionary of user parameters,
        keys correspond to sections in the config file
        :param data_param: a dictionary of input image parameters,
        keys correspond to data properties to be used by image_reader
        :return:
        """
        try:
            system_param = workflow_param.get('SYSTEM', None)
            net_param = workflow_param.get('NETWORK', None)
            train_param = workflow_param.get('TRAINING', None)
            infer_param = workflow_param.get('INFERENCE', None)
            app_param = workflow_param.get('CUSTOM', None)
        except AttributeError:
            tf.logging.fatal('parameters should be dictionaries')
            raise

        assert os.path.exists(system_param.model_dir), \
            'Model folder not exists {}'.format(system_param.model_dir)
        self.is_training = (system_param.action == "train")
        # hardware-related parameters
        self.num_threads = max(system_param.num_threads, 1) \
            if self.is_training else 1
        self.num_gpus = system_param.num_gpus \
            if self.is_training else min(system_param.num_gpus, 1)
        set_cuda_device(system_param.cuda_devices)

        # set output TF model folders
        self.model_dir = touch_folder(
            os.path.join(system_param.model_dir, 'models'))
        self.session_prefix = os.path.join(self.model_dir, FILE_PREFIX)

        if self.is_training:
            assert train_param, 'training parameters not specified'
            summary_root = os.path.join(system_param.model_dir, 'logs')
            self.summary_dir = get_latest_subfolder(
                summary_root, create_new=train_param.starting_iter == 0)

            # training iterations-related parameters
            self.initial_iter = train_param.starting_iter
            self.final_iter = train_param.max_iter
            self.save_every_n = train_param.save_every_n
            self.tensorboard_every_n = train_param.tensorboard_every_n
            self.max_checkpoints = train_param.max_checkpoints
            self.gradients_collector = GradientsCollector(
                n_devices=max(self.num_gpus, 1))
            action_param = train_param
        else:
            assert infer_param, 'inference parameters not specified'
            self.initial_iter = infer_param.inference_iter
            action_param = infer_param

        self.outputs_collector = OutputsCollector(
            n_devices=max(self.num_gpus, 1))

        # create an application instance
        assert app_param, 'application specific param. not specified'
        app_module = ApplicationDriver._create_app(app_param.name)
        self.app = app_module(net_param, action_param, self.is_training)
        # initialise data input
        self.app.initialise_dataset_loader(data_param, app_param)
        # pylint: disable=not-context-manager
        with self.graph.as_default(), tf.name_scope('Sampler'):
            self.app.initialise_sampler()
示例#11
0
    def initialise_application(self, workflow_param, data_param):
        """
        This function receives all parameters from user config file,
        create an instance of application.

        :param workflow_param: a dictionary of user parameters,
            keys correspond to sections in the config file
        :param data_param: a dictionary of input image parameters,
            keys correspond to data properties to be used by image_reader
        :return:
        """
        try:
            system_param = workflow_param.get('SYSTEM', None)
            net_param = workflow_param.get('NETWORK', None)
            train_param = workflow_param.get('TRAINING', None)
            infer_param = workflow_param.get('INFERENCE', None)
            app_param = workflow_param.get('CUSTOM', None)
        except AttributeError:
            tf.logging.fatal('parameters should be dictionaries')
            raise

        assert os.path.exists(system_param.model_dir), \
            'Model folder not exists {}'.format(system_param.model_dir)
        self.is_training = (system_param.action == "train")
        # hardware-related parameters
        self.num_threads = max(system_param.num_threads, 1) \
            if self.is_training else 1
        self.num_gpus = system_param.num_gpus \
            if self.is_training else min(system_param.num_gpus, 1)
        set_cuda_device(system_param.cuda_devices)

        # set output TF model folders
        self.model_dir = touch_folder(
            os.path.join(system_param.model_dir, 'models'))
        self.session_prefix = os.path.join(self.model_dir, FILE_PREFIX)

        # set training params.
        if self.is_training:
            assert train_param, 'training parameters not specified'
            summary_root = os.path.join(system_param.model_dir, 'logs')
            self.summary_dir = get_latest_subfolder(
                summary_root,
                create_new=train_param.starting_iter == 0)

            self.initial_iter = train_param.starting_iter
            self.final_iter = max(train_param.max_iter, self.initial_iter)
            self.save_every_n = train_param.save_every_n
            self.tensorboard_every_n = train_param.tensorboard_every_n
            self.max_checkpoints = \
                max(train_param.max_checkpoints, self.max_checkpoints)
            self.gradients_collector = GradientsCollector(
                n_devices=max(self.num_gpus, 1))
            self.validation_every_n = train_param.validation_every_n
            if self.validation_every_n > 0:
                self.validation_max_iter = max(self.validation_max_iter,
                                               train_param.validation_max_iter)
            action_param = train_param
        else: # set inference params.
            assert infer_param, 'inference parameters not specified'
            self.initial_iter = infer_param.inference_iter
            action_param = infer_param

        self.outputs_collector = OutputsCollector(
            n_devices=max(self.num_gpus, 1))

        # create an application instance
        assert app_param, 'application specific param. not specified'
        app_module = ApplicationDriver._create_app(app_param.name)
        self.app = app_module(net_param, action_param, system_param.action)

        # initialise data input
        data_partitioner = ImageSetsPartitioner()
        # clear the cached file lists
        data_partitioner.reset()
        do_new_partition = \
            self.is_training and self.initial_iter == 0 and \
            (not os.path.isfile(system_param.dataset_split_file)) and \
            (train_param.exclude_fraction_for_validation > 0 or
             train_param.exclude_fraction_for_inference > 0)
        data_fractions = None
        if do_new_partition:
            assert train_param.exclude_fraction_for_validation > 0 or \
                   self.validation_every_n <= 0, \
                'validation_every_n is set to {}, ' \
                'but train/validation splitting not available,\nplease ' \
                'check "exclude_fraction_for_validation" in the config ' \
                'file (current config value: {}).'.format(
                    self.validation_every_n,
                    train_param.exclude_fraction_for_validation)
            data_fractions = (train_param.exclude_fraction_for_validation,
                              train_param.exclude_fraction_for_inference)

        if data_param:
            data_partitioner.initialise(
                data_param=data_param,
                new_partition=do_new_partition,
                ratios=data_fractions,
                data_split_file=system_param.dataset_split_file)

        if data_param and self.is_training and self.validation_every_n > 0:
            assert data_partitioner.has_validation, \
                'validation_every_n is set to {}, ' \
                'but train/validation splitting not available.\nPlease ' \
                'check dataset partition list {} ' \
                '(remove file to generate a new dataset partition). ' \
                'Or set validation_every_n to -1.'.format(
                    self.validation_every_n, system_param.dataset_split_file)

        # initialise readers
        self.app.initialise_dataset_loader(
            data_param, app_param, data_partitioner)

        self._data_partitioner = data_partitioner

        # pylint: disable=not-context-manager
        with self.graph.as_default(), tf.name_scope('Sampler'):
            self.app.initialise_sampler()
示例#12
0
    def initialise_application(self, workflow_param, data_param):
        """
        This function receives all parameters from user config file,
        create an instance of application.

        :param workflow_param: a dictionary of user parameters,
            keys correspond to sections in the config file
        :param data_param: a dictionary of input image parameters,
            keys correspond to data properties to be used by image_reader
        :return:
        """
        try:
            system_param = workflow_param.get('SYSTEM', None)
            net_param = workflow_param.get('NETWORK', None)
            train_param = workflow_param.get('TRAINING', None)
            infer_param = workflow_param.get('INFERENCE', None)
            app_param = workflow_param.get('CUSTOM', None)
        except AttributeError:
            tf.logging.fatal('parameters should be dictionaries')
            raise

        assert os.path.exists(system_param.model_dir), \
            'Model folder not exists {}'.format(system_param.model_dir)
        self.is_training = (system_param.action == "train")
        # hardware-related parameters
        self.num_threads = max(system_param.num_threads, 1) \
            if self.is_training else 1
        self.num_gpus = system_param.num_gpus \
            if self.is_training else min(system_param.num_gpus, 1)
        set_cuda_device(system_param.cuda_devices)

        # set output TF model folders
        self.model_dir = touch_folder(
            os.path.join(system_param.model_dir, 'models'))
        self.session_prefix = os.path.join(self.model_dir, FILE_PREFIX)

        if self.is_training:
            assert train_param, 'training parameters not specified'
            summary_root = os.path.join(system_param.model_dir, 'logs')
            self.summary_dir = get_latest_subfolder(
                summary_root, create_new=train_param.starting_iter == 0)

            self.initial_iter = train_param.starting_iter
            self.final_iter = max(train_param.max_iter, self.initial_iter)
            self.save_every_n = train_param.save_every_n
            self.tensorboard_every_n = train_param.tensorboard_every_n
            self.max_checkpoints = \
                max(train_param.max_checkpoints, self.max_checkpoints)
            self.gradients_collector = GradientsCollector(
                n_devices=max(self.num_gpus, 1))
            self.validation_every_n = train_param.validation_every_n
            if self.validation_every_n > 0:
                self.validation_max_iter = max(self.validation_max_iter,
                                               train_param.validation_max_iter)
            action_param = train_param
        else:
            assert infer_param, 'inference parameters not specified'
            self.initial_iter = infer_param.inference_iter
            action_param = infer_param

        self.outputs_collector = OutputsCollector(
            n_devices=max(self.num_gpus, 1))

        # create an application instance
        assert app_param, 'application specific param. not specified'
        app_module = ApplicationDriver._create_app(app_param.name)
        self.app = app_module(net_param, action_param, self.is_training)

        # initialise data input
        data_partitioner = ImageSetsPartitioner()
        # clear the cached file lists
        data_partitioner.reset()
        do_new_partition = \
            self.is_training and self.initial_iter == 0 and \
            (not os.path.isfile(system_param.dataset_split_file)) and \
            (train_param.exclude_fraction_for_validation > 0 or
             train_param.exclude_fraction_for_inference > 0)
        data_fractions = None
        if do_new_partition:
            assert train_param.exclude_fraction_for_validation > 0 or \
                   self.validation_every_n <= 0, \
                'validation_every_n is set to {}, ' \
                'but train/validation splitting not available,\nplease ' \
                'check "exclude_fraction_for_validation" in the config ' \
                'file (current config value: {}).'.format(
                    self.validation_every_n,
                    train_param.exclude_fraction_for_validation)
            data_fractions = (train_param.exclude_fraction_for_validation,
                              train_param.exclude_fraction_for_inference)

        if data_param:
            data_partitioner.initialise(
                data_param=data_param,
                new_partition=do_new_partition,
                ratios=data_fractions,
                data_split_file=system_param.dataset_split_file)

        if data_param and self.is_training and self.validation_every_n > 0:
            assert data_partitioner.has_validation, \
                'validation_every_n is set to {}, ' \
                'but train/validation splitting not available.\nPlease ' \
                'check dataset partition list {} ' \
                '(remove file to generate a new dataset partition). ' \
                'Or set validation_every_n to -1.'.format(
                    self.validation_every_n, system_param.dataset_split_file)

        # initialise readers
        self.app.initialise_dataset_loader(data_param, app_param,
                                           data_partitioner)

        self._data_partitioner = data_partitioner

        # pylint: disable=not-context-manager
        with self.graph.as_default(), tf.name_scope('Sampler'):
            self.app.initialise_sampler()