예제 #1
0
    def blur(img, fwhm, gradient=True, subdir='tmp'):
        # note c3d can take voxel rather than fwhm specification, but the Algorithms interface
        # currently doesn't allow this to be used ... maybe an argument from switching from mincblur
        if fwhm in (-1, 0, None):
            if gradient:
                raise ValueError(
                    "can't compute gradient without a positive FWHM")
            return Result(stages=Stages(), output=Namespace(img=img))

        if gradient:
            out_gradient = img.newname_with("_blur%s_grad" % fwhm)
        else:
            out_gradient = None

        out_img = img.newname_with("_blurred%s" % fwhm)

        cmd = CmdStage(cmd=[
            'c3d', '-smooth',
            "%smm" % fwhm, '-o', out_img.path, img.path
        ] + (['-gradient', '-o', out_gradient.path] if gradient else []),
                       inputs=(img),
                       outputs=(out_img, out_gradient) if gradient else
                       (out_img, ))
        return Result(stages=Stages((cmd, )),
                      output=Namespace(img=out_img, gradient=out_gradient)
                      if gradient else Namespace(img=out_img))
예제 #2
0
 def test_create_fail(self):
     with self.assertRaises(Exception):
         S3AssetSigner.create(
             Namespace(asset_store_access_key=None,
                       asset_store_secret_key='mock_s3_secret_key',
                       asset_store_region='mock-s3-region',
                       asset_store_bucket='mock-s3-bucket',
                       asset_store_public=False))
     with self.assertRaises(Exception):
         S3AssetSigner.create(
             Namespace(asset_store_access_key='mock_s3_access_key',
                       asset_store_secret_key=None,
                       asset_store_region='mock-s3-region',
                       asset_store_bucket='mock-s3-bucket',
                       asset_store_public=False))
     with self.assertRaises(Exception):
         S3AssetSigner.create(
             Namespace(asset_store_access_key='mock_s3_access_key',
                       asset_store_secret_key='mock_s3_secret_key',
                       asset_store_region=None,
                       asset_store_bucket='mock-s3-bucket',
                       asset_store_public=False))
     with self.assertRaises(Exception):
         S3AssetSigner.create(
             Namespace(asset_store_access_key='mock_s3_access_key',
                       asset_store_secret_key='mock_s3_secret_key',
                       asset_store_region='mock-s3-region',
                       asset_store_bucket=None,
                       asset_store_public=False))
예제 #3
0
 def test_create_fail(self):
     with self.assertRaises(Exception):
         FileSystemAssetSigner.create(
             Namespace(asset_store_url_prefix=None,
                       asset_store_secret='asset_secret',
                       asset_store_public=False))
     with self.assertRaises(Exception):
         FileSystemAssetSigner.create(
             Namespace(asset_store_url_prefix='http://skygear.dev/files',
                       asset_store_secret=None,
                       asset_store_public=False))
예제 #4
0
    def test_create_fail(self):
        with self.assertRaises(Exception):
            CloudAssetSigner.create(
                Namespace(appname=None,
                          cloud_asset_host='http://mock-cloud-asset.dev',
                          cloud_asset_token='mock-cloud-asset-token',
                          cloud_asset_public_prefix=
                          'http://mock-cloud-asset.dev/public',
                          cloud_asset_private_prefix=(
                              'http://mock-cloud-asset.dev/private'),
                          asset_store_public=False))

        with self.assertRaises(Exception):
            CloudAssetSigner.create(
                Namespace(appname='skygear-test',
                          cloud_asset_host=None,
                          cloud_asset_token='mock-cloud-asset-token',
                          cloud_asset_public_prefix=
                          'http://mock-cloud-asset.dev/public',
                          cloud_asset_private_prefix=(
                              'http://mock-cloud-asset.dev/private'),
                          asset_store_public=False))

        with self.assertRaises(Exception):
            CloudAssetSigner.create(
                Namespace(appname='skygear-test',
                          cloud_asset_host='http://mock-cloud-asset.dev',
                          cloud_asset_token=None,
                          cloud_asset_public_prefix=
                          'http://mock-cloud-asset.dev/public',
                          cloud_asset_private_prefix=(
                              'http://mock-cloud-asset.dev/private'),
                          asset_store_public=False))

        with self.assertRaises(Exception):
            CloudAssetSigner.create(
                Namespace(appname='skygear-test',
                          cloud_asset_host='http://mock-cloud-asset.dev',
                          cloud_asset_token='mock-cloud-asset-token',
                          cloud_asset_public_prefix=
                          'http://mock-cloud-asset.dev/public',
                          cloud_asset_private_prefix=None,
                          asset_store_public=False))

        with self.assertRaises(Exception):
            CloudAssetSigner.create(
                Namespace(appname='skygear-test',
                          cloud_asset_host='http://mock-cloud-asset.dev',
                          cloud_asset_token='mock-cloud-asset-token',
                          cloud_asset_public_prefix=None,
                          cloud_asset_private_prefix=(
                              'http://mock-cloud-asset.dev/private'),
                          asset_store_public=True))
예제 #5
0
def NLIN_pipeline(options):

    if options.application.files is None:
        raise ValueError("Please, some files! (or try '--help')")  # TODO make a util procedure for this

    output_dir    = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM and LSQ6 ...
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    nlin_dir      = os.path.join(output_dir, pipeline_name + "_nlin")

    resolution = (options.registration.resolution  # TODO does using the finest resolution here make sense?
                  or min([get_resolution_from_file(f) for f in options.application.files]))

    imgs = [MincAtom(f, pipeline_sub_dir=processed_dir) for f in options.application.files]

    # determine NLIN settings by overriding defaults with
    # any settings present in protocol file, if it exists
    # could add a hook to print a message announcing completion, output files,
    # add more stages here to make a CSV

    initial_target_mask = MincAtom(options.nlin.target_mask) if options.nlin.target_mask else None
    initial_target = MincAtom(options.nlin.target, mask=initial_target_mask)

    full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.nlin.nlin_protocol,
                                                              flag_nlin_protocol=next(iter(options.nlin.flags_.nlin_protocol)),
                                                              reg_method=options.nlin.reg_method,
                                                              file_resolution=resolution)

    s = Stages()

    nlin_result = s.defer(nlin_build_model(imgs, initial_target=initial_target, conf=full_hierarchy, nlin_dir=nlin_dir))

    # TODO return these?
    inverted_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in nlin_result.output]

    if options.stats.calc_stats:
        # TODO: put the stats part behind a flag ...

        determinants = [s.defer(determinants_at_fwhms(
                                  xfm=inv_xfm,
                                  inv_xfm=xfm,
                                  blur_fwhms=options.stats.stats_kernels))
                        for xfm, inv_xfm in zip(nlin_result.output, inverted_xfms)]

        return Result(stages=s,
                      output=Namespace(nlin_xfms=nlin_result,
                                       avg_img=nlin_result.avg_img,
                                       determinants=determinants))
    else:
        # there's no consistency in what gets returned, yikes ...
        return Result(stages=s, output=Namespace(nlin_xfms=nlin_result, avg_img=nlin_result.avg_img))
예제 #6
0
def alembic_config_from_url(db_url: str = None) -> Config:
    options = Namespace(config="alembic.ini",
                        name="alembic",
                        db_url=db_url,
                        raiseerr=False,
                        x=None)
    return make_alembic_config(options)
예제 #7
0
def layout_pos2temp(options: configargparse.Namespace, pos: list,
                    powers: list):
    if options.bcs is None:
        options.bcs = []
    task = get_task(
        geometry_board="s",
        size_board=options.length,
        grid_board=options.nx,
        geometry=len(options.units) * "r",
        size=options.units,
        angle=options.angles,
        intensity=options.powers,
        rad=False,
        method=None,
    )
    layout = task.layout_from_pos(pos, powers)
    temp, xs, ys, zs = run_solver_c(
        ndim=options.ndim,
        length=options.length,
        units=options.units,
        bcs=options.bcs,
        u0=options.u_D,
        powers=powers,
        nx=options.nx - 1,
        F=layout,
    )
    return layout, temp
예제 #8
0
def make_alembic_config(options: Namespace,
                        base_path: str = PROJECT_PATH) -> Config:
    """
    Создает объект конфигурации alembic на основе аргументов командной строки,
    и настраивает его (меняет относительные пути на абсолютные).

    :param options: аргументы командной строки
    :param base_path: путь, относительного которого формируются относительные пути
    :return: объект конфигурации alembic
    """
    # Если указан относительный путь до alembic.ini, то добавляем в начало base_path
    # (формируем абсолютный путь)
    if not os.path.isabs(options.config):
        options.config = os.path.join(base_path, options.config)

    # Создаем объект конфигурации Alembic
    config = Config(file_=options.config,
                    ini_section=options.name,
                    cmd_opts=options)
    if options.db_url:
        # Меняем значение sqlalchemy.url из конфига Alembic
        config.set_main_option("sqlalchemy.url", options.db_url)

    # Подменяем путь до папки с alembic (требуется, чтобы alembic мог найти env.py, шаблон для
    # генерации миграций и сами миграции)
    alembic_location = config.get_main_option("script_location")
    if not os.path.isabs(alembic_location):
        config.set_main_option("script_location",
                               os.path.join(base_path, alembic_location))

    return config
예제 #9
0
 def mock_options(self):
     return Namespace(asset_store_access_key='mock_s3_access_key',
                      asset_store_secret_key='mock_s3_secret_key',
                      asset_store_region='mock-s3-region',
                      asset_store_bucket='mock-s3-bucket',
                      asset_store_s3_url_prefix=None,
                      asset_store_public=False)
예제 #10
0
def layout_pos_list2temp(options: configargparse.Namespace,
                         layout_pos_list: list, powers: list):
    """Get tempreture field from layout position list.

    Args:
        options (configargparse.Namespace): options must have keys `ndim, length, length_unit, unit_per_row, bcs, u_D, nx`. Note that `nx` here is element number which is node number minus 1.
        layout_pos_list (list): The position starts from 0.
        powers (list): The power for each layout unit. len(layout_pos_list)=len(powers).

    Returns:
        F, U, xs, ys, zs
    """
    if "vtk" not in options:
        options.vtk = False
    F = io.layout2matrix(
        options.ndim,
        options.nx,
        options.unit_per_row,
        powers,
        layout_pos_list,
    )
    U, xs, ys, zs = run_solver(
        options.ndim,
        options.length,
        options.length_unit,
        options.bcs,
        layout_pos_list,
        options.u_D,
        powers,
        options.nx,
        coordinates=True,
        F=F,
        vtk=options.vtk,
    )
    return F, U, xs, ys, zs
예제 #11
0
 def mock_options(self, public=False):
     return Namespace(
         appname='skygear-test',
         cloud_asset_host='http://mock-cloud-asset.dev',
         cloud_asset_token='mock-cloud-asset-token',
         cloud_asset_public_prefix='http://mock-cloud-asset.dev/public',
         cloud_asset_private_prefix='http://mock-cloud-asset.dev/private',
         asset_store_public=public)
    def build_model(self, args):
        model = super().build_model(args)
        if getattr(args, 'eval_bleu', False):
            assert getattr(args, 'eval_bleu_detok', None) is not None, (
                '--eval-bleu-detok is required if using --eval-bleu; '
                'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '
                'to disable detokenization, e.g., when using sentencepiece)')
            detok_args = json.loads(
                getattr(args, 'eval_bleu_detok_args', '{}') or '{}')
            self.tokenizer = encoders.build_tokenizer(
                Namespace(tokenizer=getattr(args, 'eval_bleu_detok', None),
                          **detok_args))

            gen_args = json.loads(
                getattr(args, 'eval_bleu_args', '{}') or '{}')
            self.sequence_generator = self.build_generator(
                [model], Namespace(**gen_args))
        return model
예제 #13
0
def generate_from_cli(options: configargparse.Namespace):
    """Generate from cli with options.

    Arguments:
        options (configargparse.Namespace): config options
    """
    if options.bcs is None:
        options.bcs = []
    np.random.seed(options.seed)
    seeds = np.random.randint(2**32, size=options.worker)
    print(options.worker)
    print(seeds)
    seeds_q: Queue = Queue()
    for seed in seeds:
        seeds_q.put(seed)

    unit_n = len(options.units)
    geometry = ["r"] * unit_n
    task = get_task(
        geometry_board="s",
        size_board=options.length,
        grid_board=options.nx + 1,
        geometry=geometry,
        size=options.units,
        angle=options.angles,
        intensity=options.powers,
        rad=False,
        method=options.sampler,
    )
    task.warmup()  # warm up, especially for gibbs

    if options.method == "fenics":
        # 创建单参数函数

        method_fenics_p = partial(method_fenics,
                                  options=options,
                                  sampler=task.sample)

        # for i in range(options.sample_n):
        #     method_fenics_p(i)

        # multiprocess support
        with Pool(options.worker, initializer=pool_init,
                  initargs=(seeds_q, )) as pool:
            pool_it = pool.imap(method_fenics_p, range(options.sample_n))
            # for i in pool_it:
            #     print(i)
            for _ in tqdm.tqdm(
                    pool_it,
                    desc=f"{pool._processes} workers's running",
                    total=options.sample_n,
                    ncols=100,
            ):
                pass

    print(f"Generated {options.sample_n} layouts in {options.data_dir}")
예제 #14
0
 def test_sign_public(self):
     signer = CloudAssetSigner.create(Namespace(
         appname='skygear-test',
         cloud_asset_host='http://mock-cloud-asset.dev',
         cloud_asset_token='mock-cloud-asset-token',
         cloud_asset_public_prefix='http://mock-cloud-asset.dev/public',
         cloud_asset_private_prefix='http://mock-cloud-asset.dev/private',
         asset_store_public=True))
     assert signer.sign('a good fixture') == \
         'http://mock-cloud-asset.dev/public/skygear-test/a%20good%20fixture'  # noqa
예제 #15
0
파일: args.py 프로젝트: sailfish009/molpal
def cleanup_args(args: Namespace) -> None:
    """Remove unnecessary attributes and change some arguments"""
    if isinstance(args.scores_csvs, list) and len(args.scores_csvs) == 1:
        args.scores_csvs = args.scores_csvs[0]
    args.title_line = not args.no_title_line

    args_to_remove = {'no_title_line'}

    if args.objective == 'docking':
        args_to_remove |= {
            'lookup_path', 'no_lookup_title_line', 'lookup_smiles_col',
            'lookup_data_col', 'lookup_sep'
        }
    elif args.objective == 'lookup':
        args_to_remove |= {
            'software',
            'receptor',
            'box_center',
            'box_size',
            'score_mode',
        }

    if args.metric != 'ei' or args.metric != 'pi':
        args_to_remove.add('xi')
    if args.metric != 'ucb':
        args_to_remove.add('beta')
    if args.metric != 'threshold':
        args_to_remove.add('threshold')

    if not args.cluster:
        args_to_remove |= {'temp_i', 'temp_f'}

    if args.model != 'gp':
        args_to_remove |= {'gp_kernel'}
    if args.model != 'nn':
        args_to_remove |= set()
    if args.model != 'mpn':
        args_to_remove |= {'init_lr', 'max_lr', 'final_lr'}
    if args.model != 'nn' and args.model != 'mpn':
        args_to_remove |= {'conf_method'}

    for arg in args_to_remove:
        delattr(args, arg)
    def parse(self, args):
        if len(args) == 1 and args[0] in ['-h', '--help']:
            self._parser.print_help()
            return None

        # Parse train pipeline options
        meta_options, extra_args = self._parser.parse_known_args(args)
        print(meta_options)

        if hasattr(meta_options, self._pipeline_config_key):
            extra_args = [
                '--config',
                getattr(meta_options, self._pipeline_config_key),
            ] + extra_args

        pipeline_options = self._pipeline_parser.parse(extra_args)

        options = Namespace()
        options.meta = meta_options
        options.pipeline = pipeline_options

        return options
    def parse(self, args):
        if len(args) == 1 and args[0] in ['-h', '--help']:
            self._parser.print_help()
            return None

        # Parse train pipeline options
        pipeline_options, extra_args = self._parser.parse_known_args(args)
        config_option, _ = self._config_option_parser.parse_known_args(args)

        options = Namespace()
        options.pipeline = pipeline_options
        options.model = None
        options.model_api = None

        # Parse specific model options if there are model parsers
        if self._models is not None:
            if pipeline_options.model not in self._models:
                raise KeyError('Invalid model: {}'.format(
                    pipeline_options.model))

            if config_option:
                extra_args = ['--config', config_option.config] + extra_args

            # Check if there are model parsers
            model_parser = self._models[pipeline_options.model]
            model_options, remaining_args = model_parser.parse_known_args(
                extra_args)

            options.model = model_options
            # Retrieve the respective API for the selected model
            options.model_api = model_parser.api_module
        else:
            remaining_args = extra_args

        options.all_options = merge_namespaces(options.pipeline, options.model)

        if remaining_args:
            raise KeyError('Unrecognized options: {}'.format(remaining_args))

        return options
예제 #18
0
def mbm(imgs : List[MincAtom], options : MBMConf, prefix : str, output_dir : str = ""):

    # TODO could also allow pluggable pipeline parts e.g. LSQ6 could be substituted out for the modified LSQ6
    # for the kidney tips, etc...

    # TODO this is tedious and annoyingly similar to the registration chain ...
    lsq6_dir  = os.path.join(output_dir, prefix + "_lsq6")
    lsq12_dir = os.path.join(output_dir, prefix + "_lsq12")
    nlin_dir  = os.path.join(output_dir, prefix + "_nlin")

    s = Stages()

    if len(imgs) == 0:
        raise ValueError("Please, some files!")

    # FIXME: why do we have to call registration_targets *outside* of lsq6_nuc_inorm? is it just because of the extra
    # options required?  Also, shouldn't options.registration be a required input (as it contains `input_space`) ...?
    targets = registration_targets(lsq6_conf=options.mbm.lsq6,
                                   app_conf=options.application,
                                   first_input_file=imgs[0].path)

    # TODO this is quite tedious and duplicates stuff in the registration chain ...
    resolution = (options.registration.resolution or
                  get_resolution_from_file(targets.registration_standard.path))
    options.registration = options.registration.replace(resolution=resolution)

    # FIXME it probably makes most sense if the lsq6 module itself (even within lsq6_nuc_inorm) handles the run_lsq6
    # setting (via use of the identity transform) since then this doesn't have to be implemented for every pipeline
    if options.mbm.lsq6.run_lsq6:
        lsq6_result = s.defer(lsq6_nuc_inorm(imgs=imgs,
                                             resolution=resolution,
                                             registration_targets=targets,
                                             lsq6_dir=lsq6_dir,
                                             lsq6_options=options.mbm.lsq6))
    else:
        # TODO don't actually do this resampling if not required (i.e., if the imgs already have the same grids)
        identity_xfm = s.defer(param2xfm(out_xfm=FileAtom(name="identity.xfm")))
        lsq6_result  = [XfmHandler(source=img, target=img, xfm=identity_xfm,
                                   resampled=s.defer(mincresample_new(img=img,
                                                                      like=targets.registration_standard,
                                                                      xfm=identity_xfm)))
                        for img in imgs]
    # what about running nuc/inorm without a linear registration step??

    full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.mbm.nlin.nlin_protocol,
                                                              reg_method=options.mbm.nlin.reg_method,
                                                              file_resolution=resolution)

    lsq12_nlin_result = s.defer(lsq12_nlin_build_model(imgs=[xfm.resampled for xfm in lsq6_result],
                                                       resolution=resolution,
                                                       lsq12_dir=lsq12_dir,
                                                       nlin_dir=nlin_dir,
                                                       nlin_prefix=prefix,
                                                       lsq12_conf=options.mbm.lsq12,
                                                       nlin_conf=full_hierarchy))

    inverted_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in lsq12_nlin_result.output]

    determinants = s.defer(determinants_at_fwhms(
                             xfms=inverted_xfms,
                             inv_xfms=lsq12_nlin_result.output,
                             blur_fwhms=options.mbm.stats.stats_kernels))

    overall_xfms = [s.defer(concat_xfmhandlers([rigid_xfm, lsq12_nlin_xfm]))
                    for rigid_xfm, lsq12_nlin_xfm in zip(lsq6_result, lsq12_nlin_result.output)]

    output_xfms = (pd.DataFrame({ "rigid_xfm"      : lsq6_result,  # maybe don't return this if LSQ6 not run??
                                  "lsq12_nlin_xfm" : lsq12_nlin_result.output,
                                  "overall_xfm"    : overall_xfms }))
    # we could `merge` the determinants with this table, but preserving information would cause lots of duplication
    # of the transforms (or storing determinants in more columns, but iterating over dynamically known columns
    # seems a bit odd ...)

                            # TODO transpose these fields?})
                            #avg_img=lsq12_nlin_result.avg_img,  # inconsistent w/ WithAvgImgs[...]-style outputs
                           # "determinants"    : determinants })

    #output.avg_img = lsq12_nlin_result.avg_img
    #output.determinants = determinants   # TODO temporary - remove once incorporated properly into `output` proper
    # TODO add more of lsq12_nlin_result?

    # FIXME: this needs to go outside of the `mbm` function to avoid being run from within other pipelines (or
    # those other pipelines need to turn off this option)
    # TODO return some MAGeT stuff from MBM function ??
    # if options.mbm.mbm.run_maget:
    #     import copy
    #     maget_options = copy.deepcopy(options)  #Namespace(maget=options)
    #     #maget_options
    #     #maget_options.maget = maget_options.mbm
    #     #maget_options.execution = options.execution
    #     #maget_options.application = options.application
    #     maget_options.maget = options.mbm.maget
    #     del maget_options.mbm
    #
    #     s.defer(maget([xfm.resampled for xfm in lsq6_result],
    #                   options=maget_options,
    #                   prefix="%s_MAGeT" % prefix,
    #                   output_dir=os.path.join(output_dir, prefix + "_processed")))

    # should also move outside `mbm` function ...
    #if options.mbm.thickness.run_thickness:
    #    if not options.mbm.segmentation.run_maget:
    #        warnings.warn("MAGeT files (atlases, protocols) are needed to run thickness calculation.")
    #    # run MAGeT to segment the nlin average:
    #    import copy
    #    maget_options = copy.deepcopy(options)  #Namespace(maget=options)
    #    maget_options.maget = options.mbm.maget
    #    del maget_options.mbm
    #    segmented_avg = s.defer(maget(imgs=[lsq12_nlin_result.avg_img],
    #                                  options=maget_options,
    #                                  output_dir=os.path.join(options.application.output_directory,
    #                                                          prefix + "_processed"),
    #                                  prefix="%s_thickness_MAGeT" % prefix)).ix[0].img
    #    thickness = s.defer(cortical_thickness(xfms=pd.Series(inverted_xfms), atlas=segmented_avg,
    #                                           label_mapping=FileAtom(options.mbm.thickness.label_mapping),
    #                                           atlas_fwhm=0.56, thickness_fwhm=0.56))  # TODO magic fwhms
    #    # TODO write CSV -- should `cortical_thickness` do this/return a table?


    # FIXME: this needs to go outside of the `mbm` function to avoid being run from within other pipelines (or
    # those other pipelines need to turn off this option)
    if options.mbm.common_space.do_common_space_registration:
        warnings.warn("This feature is experimental ...")
        if not options.mbm.common_space.common_space_model:
            raise ValueError("No common space template provided!")
        # TODO allow lsq6 registration as well ...
        common_space_model = MincAtom(options.mbm.common_space.common_space_model,
                                      pipeline_sub_dir=os.path.join(options.application.output_directory,
                                                         options.application.pipeline_name + "_processed"))
        # TODO allow different lsq12/nlin config params than the ones used in MBM ...
        # WEIRD ... see comment in lsq12_nlin code ...
        nlin_conf  = full_hierarchy.confs[-1] if isinstance(full_hierarchy, MultilevelMincANTSConf) else full_hierarchy
        # also weird that we need to call get_linear_configuration_from_options here ... ?
        lsq12_conf = get_linear_configuration_from_options(conf=options.mbm.lsq12,
                                                           transform_type=LinearTransType.lsq12,
                                                           file_resolution=resolution)
        xfm_to_common = s.defer(lsq12_nlin(source=lsq12_nlin_result.avg_img, target=common_space_model,
                                           lsq12_conf=lsq12_conf, nlin_conf=nlin_conf,
                                           resample_source=True))

        model_common = s.defer(mincresample_new(img=lsq12_nlin_result.avg_img,
                                                xfm=xfm_to_common.xfm, like=common_space_model,
                                                postfix="_common"))

        overall_xfms_common = [s.defer(concat_xfmhandlers([rigid_xfm, nlin_xfm, xfm_to_common]))
                               for rigid_xfm, nlin_xfm in zip(lsq6_result, lsq12_nlin_result.output)]

        xfms_common = [s.defer(concat_xfmhandlers([nlin_xfm, xfm_to_common]))
                       for nlin_xfm in lsq12_nlin_result.output]

        output_xfms = output_xfms.assign(xfm_common=xfms_common, overall_xfm_common=overall_xfms_common)

        log_nlin_det_common, log_full_det_common = [dets.map(lambda d:
                                                      s.defer(mincresample_new(
                                                        img=d,
                                                        xfm=xfm_to_common.xfm,
                                                        like=common_space_model,
                                                        postfix="_common",
                                                        extra_flags=("-keep_real_range",),
                                                        interpolation=Interpolation.nearest_neighbour)))
                                                    for dets in (determinants.log_nlin_det, determinants.log_full_det)]

        determinants = determinants.assign(log_nlin_det_common=log_nlin_det_common,
                                           log_full_det_common=log_full_det_common)

    output = Namespace(avg_img=lsq12_nlin_result.avg_img, xfms=output_xfms, determinants=determinants)

    if options.mbm.common_space.do_common_space_registration:
        output.model_common = model_common

    return Result(stages=s, output=output)
예제 #19
0
파일: args.py 프로젝트: sailfish009/molpal
def modify_LookupObjective_args(args: Namespace) -> None:
    args.lookup_title_line = not args.no_lookup_title_line
    delattr(args, 'no_lookup_title_line')

    if args.name is None:
        args.name = Path(args.library).stem
예제 #20
0
Parameters can be decentrally defined in any module by getting the global parser via get_settings_parser
and registering them by add_argument(). parse_settings() needs to be called one in the main program, then
all parameters are available under the global Namespace settings.
If sys.argv shall not be used, e.g., because pymhlib is embedded in some framework like Django or
a Jupyter notebook, pass "" as args (or some meaningful initialization parameters).

For the usage of config files see the documentation of configargparse or call the program with -h.
"""

import pickle
import random
import numpy as np
from configargparse import ArgParser, Namespace, ArgumentDefaultsRawHelpFormatter

settings = Namespace()  # global Namespace with all settings
unknown_args = []  # global list with all unknown parameters
_parser = None  # single global settings parser


def get_settings_parser() -> ArgParser:
    """Returns the single global argument parser for adding parameters.

    Parameters can be added in all modules by add_argument.
    After calling parse() once in the main program, all settings
    are available in the global settings dictionary.
    """
    global _parser  # pylint: disable=global-statement
    if not _parser:
        _parser = ArgParser(  # default_config_files=["default.cfg"],
            formatter_class=ArgumentDefaultsRawHelpFormatter)
예제 #21
0
def mbm(imgs: List[MincAtom],
        options: MBMConf,
        prefix: str,
        output_dir: str = "",
        with_maget: bool = True):

    # TODO could also allow pluggable pipeline parts e.g. LSQ6 could be substituted out for the modified LSQ6
    # for the kidney tips, etc...

    # TODO this is tedious and annoyingly similar to the registration chain ...
    lsq6_dir = os.path.join(output_dir, prefix + "_lsq6")
    lsq12_dir = os.path.join(output_dir, prefix + "_lsq12")
    nlin_dir = os.path.join(output_dir, prefix + "_nlin")

    s = Stages()

    if len(imgs) == 0:
        raise ValueError("Please, some files!")

    # FIXME: why do we have to call registration_targets *outside* of lsq6_nuc_inorm? is it just because of the extra
    # options required?  Also, shouldn't options.registration be a required input (as it contains `input_space`) ...?
    targets = s.defer(
        registration_targets(lsq6_conf=options.mbm.lsq6,
                             app_conf=options.application,
                             reg_conf=options.registration,
                             first_input_file=imgs[0].path))

    # TODO this is quite tedious and duplicates stuff in the registration chain ...
    resolution = (options.registration.resolution or get_resolution_from_file(
        targets.registration_standard.path))
    options.registration = options.registration.replace(resolution=resolution)

    # FIXME: this needs to go outside of the `mbm` function to avoid being run from within other pipelines (or
    # those other pipelines need to turn off this option)
    if with_maget:
        if options.mbm.segmentation.run_maget or options.mbm.maget.maget.mask:

            # temporary fix...?
            if options.mbm.maget.maget.mask and not options.mbm.segmentation.run_maget:
                # which means that --no-run-maget was specified
                if options.mbm.maget.maget.atlas_lib == None:
                    # clearly you do not want to run MAGeT at any point in this pipeline
                    err_msg_maget = "\nYou specified not to run MAGeT using the " \
                                    "--no-run-maget flag. However, the code also " \
                                    "wants to use MAGeT to generate masks for your " \
                                    "input files after the 6 parameter alignment (lsq6). " \
                                    "Because you did not specify a MAGeT atlas library " \
                                    "this can not be done. \nTo run the pipeline without " \
                                    "using MAGeT to mask your input files, please also " \
                                    "specify: \n--maget-no-mask\n"
                    raise ValueError(err_msg_maget)

            import copy
            maget_options = copy.deepcopy(options)  #Namespace(maget=options)
            #maget_options
            #maget_options.maget = maget_options.mbm
            #maget_options.execution = options.execution
            #maget_options.application = options.application
            #maget_options.application.output_directory = os.path.join(options.application.output_directory, "segmentation")
            maget_options.maget = options.mbm.maget

            fixup_maget_options(maget_options=maget_options.maget,
                                nlin_options=maget_options.mbm.nlin,
                                lsq12_options=maget_options.mbm.lsq12)
            del maget_options.mbm

        #def with_new_output_dir(img : MincAtom):
        #img = copy.copy(img)
        #img.pipeline_sub_dir = img.pipeline_sub_dir + img.output_dir
        #img.
        #return img.newname_with_suffix(suffix="", subdir="segmentation")

    # FIXME it probably makes most sense if the lsq6 module itself (even within lsq6_nuc_inorm) handles the run_lsq6
    # setting (via use of the identity transform) since then this doesn't have to be implemented for every pipeline
    if options.mbm.lsq6.run_lsq6:
        lsq6_result = s.defer(
            lsq6_nuc_inorm(imgs=imgs,
                           resolution=resolution,
                           registration_targets=targets,
                           lsq6_dir=lsq6_dir,
                           lsq6_options=options.mbm.lsq6))
    else:
        # FIXME the code shouldn't branch here based on run_lsq6 (which should probably
        # be part of the lsq6 options rather than the MBM ones; see comments on #287.
        # TODO don't actually do this resampling if not required (i.e., if the imgs already have the same grids)??
        # however, for now need to add the masks:
        identity_xfm = s.defer(
            param2xfm(
                out_xfm=FileAtom(name=os.path.join(lsq6_dir, 'tmp', "id.xfm"),
                                 pipeline_sub_dir=lsq6_dir,
                                 output_sub_dir='tmp')))
        lsq6_result = [
            XfmHandler(source=img,
                       target=img,
                       xfm=identity_xfm,
                       resampled=s.defer(
                           mincresample_new(img=img,
                                            like=targets.registration_standard,
                                            xfm=identity_xfm))) for img in imgs
        ]
    # what about running nuc/inorm without a linear registration step??

    if with_maget and options.mbm.maget.maget.mask:
        masking_imgs = copy.deepcopy([xfm.resampled for xfm in lsq6_result])
        masked_img = (s.defer(
            maget_mask(imgs=masking_imgs,
                       resolution=resolution,
                       maget_options=maget_options.maget,
                       pipeline_sub_dir=os.path.join(
                           options.application.output_directory,
                           "%s_atlases" % prefix))))

        masked_img.index = masked_img.apply(lambda x: x.path)

        # replace any masks of the resampled images with the newly created masks:
        for xfm in lsq6_result:
            xfm.resampled = masked_img.loc[xfm.resampled.path]
    elif with_maget:
        warnings.warn(
            "Not masking your images from atlas masks after LSQ6 alignment ... probably not what you want "
            "(this can have negative effects on your registration and statistics)"
        )

    #full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.mbm.nlin.nlin_protocol,
    #                                                          flag_nlin_protocol=next(iter(options.mbm.nlin.flags_.nlin_protocol)),
    #                                                         reg_method=options.mbm.nlin.reg_method,
    #                                                          file_resolution=resolution)

    #I = TypeVar("I")
    #X = TypeVar("X")
    #def wrap_minc(nlin_module: NLIN[I, X]) -> type[NLIN[MincAtom, XfmAtom]]:
    #    class N(NLIN[MincAtom, XfmAtom]): pass

    # TODO now the user has to call get_nonlinear_component followed by parse_<...>; previously various things
    # like lsq12_nlin_pairwise all branched on the reg_method so one didn't have to call get_nonlinear_component;
    # they could still do this if it can be done safety (i.e., not breaking assumptions of various nonlinear units)
    nlin_module = get_nonlinear_component(
        reg_method=options.mbm.nlin.reg_method)

    nlin_build_model_component = get_model_building_procedure(
        options.mbm.nlin.reg_strategy,
        # was: model_building.reg_strategy
        reg_module=nlin_module)

    # does this belong here?
    # def model_building_with_initial_target_generation(prelim_model_building_component,
    #                                                   final_model_building_component):
    #     class C(final_model_building_component):
    #         @staticmethod
    #         def build_model(imgs,
    #                         conf     : BuildModelConf,
    #                         nlin_dir,
    #                         nlin_prefix,
    #                         initial_target,
    #                         output_name_wo_ext = None): pass
    #
    #     return C

    #if options.mbm.model_building.prelim_reg_strategy is not None:
    #    prelim_nlin_build_model_component = get_model_building_procedure(options.mbm.model_building.prelim_reg_strategy,
    #                                                                     reg_module=nlin_module)
    #    nlin_build_model_component = model_building_with_initial_target_generation(
    #                                   final_model_building_component=nlin_build_model_component,
    #                                   prelim_model_building_component=prelim_nlin_build_model_component)

    # TODO don't use name 'x_module' for something that's technically not a module ... perhaps unit/component?

    # TODO tedious: why can't parse_build_model_protocol handle the null protocol case? is this something we want?
    nlin_conf = (nlin_build_model_component.parse_build_model_protocol(
        options.mbm.nlin.nlin_protocol, resolution=resolution)
                 if options.mbm.nlin.nlin_protocol is not None else
                 nlin_build_model_component.get_default_build_model_conf(
                     resolution=resolution))

    lsq12_nlin_result = s.defer(
        lsq12_nlin_build_model(
            nlin_module=nlin_build_model_component,
            imgs=[xfm.resampled for xfm in lsq6_result],
            lsq12_dir=lsq12_dir,
            nlin_dir=nlin_dir,
            nlin_prefix=prefix,
            use_robust_averaging=options.mbm.nlin.use_robust_averaging,
            resolution=resolution,
            lsq12_conf=options.mbm.lsq12,
            nlin_conf=nlin_conf))  #options.mbm.nlin

    inverted_xfms = [
        s.defer(invert_xfmhandler(xfm)) for xfm in lsq12_nlin_result.output
    ]

    if options.mbm.stats.stats_kernels:
        determinants = s.defer(
            determinants_at_fwhms(xfms=inverted_xfms,
                                  inv_xfms=lsq12_nlin_result.output,
                                  blur_fwhms=options.mbm.stats.stats_kernels))
    else:
        determinants = None

    overall_xfms = [
        s.defer(concat_xfmhandlers([rigid_xfm, lsq12_nlin_xfm])) for rigid_xfm,
        lsq12_nlin_xfm in zip(lsq6_result, lsq12_nlin_result.output)
    ]

    output_xfms = (
        pd.DataFrame({
            "rigid_xfm":
            lsq6_result,  # maybe don't return this if LSQ6 not run??
            "lsq12_nlin_xfm": lsq12_nlin_result.output,
            "overall_xfm": overall_xfms
        }))
    # we could `merge` the determinants with this table, but preserving information would cause lots of duplication
    # of the transforms (or storing determinants in more columns, but iterating over dynamically known columns
    # seems a bit odd ...)

    # TODO transpose these fields?})
    #avg_img=lsq12_nlin_result.avg_img,  # inconsistent w/ WithAvgImgs[...]-style outputs
    # "determinants"    : determinants })

    #output.avg_img = lsq12_nlin_result.avg_img
    #output.determinants = determinants   # TODO temporary - remove once incorporated properly into `output` proper
    # TODO add more of lsq12_nlin_result?

    # FIXME moved above rest of registration for debugging ... shouldn't use and destructively modify lsq6_result!!!
    if with_maget and options.mbm.segmentation.run_maget:
        maget_options = copy.deepcopy(maget_options)
        maget_options.maget.maget.mask = maget_options.maget.maget.mask_only = False  # already done above
        # use the original masks here otherwise the masking step will be re-run due to the previous masking run's
        # masks having been applied to the input images:
        maget_result = s.defer(
            maget(
                [xfm.resampled for xfm in lsq6_result],
                #[xfm.resampled for _ix, xfm in mbm_result.xfms.rigid_xfm.iteritems()],
                options=maget_options,
                prefix="%s_MAGeT" % prefix,
                output_dir=os.path.join(output_dir, prefix + "_processed")))
        # FIXME add pipeline dir to path and uncomment!
        #maget.to_csv(path_or_buf="segmentations.csv", columns=['img', 'voted_labels'])

    # TODO return some MAGeT stuff from MBM function ??
    # if options.mbm.mbm.run_maget:
    #     import copy
    #     maget_options = copy.deepcopy(options)  #Namespace(maget=options)
    #     #maget_options
    #     #maget_options.maget = maget_options.mbm
    #     #maget_options.execution = options.execution
    #     #maget_options.application = options.application
    #     maget_options.maget = options.mbm.maget
    #     del maget_options.mbm
    #
    #     s.defer(maget([xfm.resampled for xfm in lsq6_result],
    #                   options=maget_options,
    #                   prefix="%s_MAGeT" % prefix,
    #                   output_dir=os.path.join(output_dir, prefix + "_processed")))

    # should also move outside `mbm` function ...
    #if options.mbm.thickness.run_thickness:
    #    if not options.mbm.segmentation.run_maget:
    #        warnings.warn("MAGeT files (atlases, protocols) are needed to run thickness calculation.")
    #    # run MAGeT to segment the nlin average:
    #    import copy
    #    maget_options = copy.deepcopy(options)  #Namespace(maget=options)
    #    maget_options.maget = options.mbm.maget
    #    del maget_options.mbm
    #    segmented_avg = s.defer(maget(imgs=[lsq12_nlin_result.avg_img],
    #                                  options=maget_options,
    #                                  output_dir=os.path.join(options.application.output_directory,
    #                                                          prefix + "_processed"),
    #                                  prefix="%s_thickness_MAGeT" % prefix)).ix[0].img
    #    thickness = s.defer(cortical_thickness(xfms=pd.Series(inverted_xfms), atlas=segmented_avg,
    #                                           label_mapping=FileAtom(options.mbm.thickness.label_mapping),
    #                                           atlas_fwhm=0.56, thickness_fwhm=0.56))  # TODO magic fwhms
    #    # TODO write CSV -- should `cortical_thickness` do this/return a table?

    output = Namespace(avg_img=lsq12_nlin_result.avg_img,
                       xfms=output_xfms,
                       determinants=determinants)

    if with_maget and options.mbm.segmentation.run_maget:
        output.maget_result = maget_result

        nlin_maget = (
            s.defer(
                maget(
                    [lsq12_nlin_result.avg_img],
                    #[xfm.resampled for _ix, xfm in mbm_result.xfms.rigid_xfm.iteritems()],
                    options=maget_options,
                    prefix="%s_nlin_MAGeT" % prefix,
                    output_dir=os.path.join(
                        output_dir,
                        prefix + "_processed")))).iloc[0]  #.voted_labels
        #output.avg_img.mask = nlin_maget.mask  # makes more sense, but might have weird effects elsewhere
        output.avg_img.labels = nlin_maget.labels

    return Result(stages=s, output=output)
예제 #22
0
def generate_from_cli(options: configargparse.Namespace):
    """Generate from cli with options.

    Arguments:
        options (configargparse.Namespace): config options
    """
    if options.bcs is None:
        options.bcs = []
    options.unit_per_row = int(options.length / options.length_unit)
    possible_n = options.unit_per_row**options.ndim
    np.random.seed(options.seed)
    if options.sampler == "uniform":
        sampler = np.random.choice
    powers_all = [
        sampler(options.power, options.unit_n) for _ in range(options.sample_n)
    ]  # 每个样本
    layout_pos_lists = [
        sampler(possible_n, options.unit_n, replace=False)
        for _ in range(options.sample_n)
    ]
    # # 叠加原理
    # if options.method == 'fenics_additive':
    #     u_basis = []
    #     for layout_pos in tqdm.trange(possible_n):
    #         u = run_solver(options.length, options.length_unit, options.bcs,
    #                          layout_pos,
    #                        0, [1.], options.nx, options.ny, False)
    #         u_basis.append(u)
    #     for i in tqdm.trange(options.sample_n):
    #         layout_pos_list = sorted(
    #             sampler(possible_n, options.unit_n, replace=False))
    #         u_elt = (powers[i] * u_basis[pos]
    #                  for i, pos in enumerate(layout_pos_list))
    #         U = reduce(np.add, u_elt) / options.unit_n + options.u_D
    #         F = io.layout2matrix(options.nx, options.ny,
    #                              unit_per_row, powers, layout_pos_list)
    #         xs, ys = get_mesh_grid(options.length, options.nx, options.ny)
    #         io.save(options, i, U, xs, ys, F, layout_pos_list)
    # 无叠加原理
    if options.method == "fenics":
        # 创建单参数函数
        method_fenics_p = partial(
            method_fenics,
            options=options,
            sampler=sampler,
            powers_all=powers_all,
            layout_pos_lists=layout_pos_lists,
        )

        # for i in range(options.sample_n):
        #     method_fenics_p(i)

        # multiprocess support
        with Pool(options.worker) as pool:
            pool_it = pool.imap_unordered(method_fenics_p,
                                          range(options.sample_n))
            for _ in tqdm.tqdm(
                    pool_it,
                    desc=f"{pool._processes} workers's running",
                    total=options.sample_n,
                    ncols=100,
            ):
                pass

    print(f"Generated {options.sample_n} layouts in {options.data_dir}")
예제 #23
0
 def process_init(s: Namespace, new_seed: int):
     """Initialization of new worker process."""
     s.seed = new_seed
     set_settings(s)
예제 #24
0
 def mock_options(self):
     return Namespace(asset_store_url_prefix='http://skygear.dev/files',
                      asset_store_secret='asset_secret',
                      asset_store_public=False)
예제 #25
0
def tamarack(imgs: pd.DataFrame, options):
    # columns of the input df: `img` : MincAtom, `timept` : number, ...
    # columns of the pride of models : 'timept' : number, 'model' : MincAtom
    s = Stages()

    # TODO some assertions that the pride_of_models, if provided, is correct, and that this is intended target type

    def group_options(options, timepoint):
        options = copy.deepcopy(options)

        if options.mbm.lsq6.target_type == TargetType.pride_of_models:
            options = copy.deepcopy(options)
            targets = get_closest_model_from_pride_of_models(
                pride_of_models_dict=get_pride_of_models_mapping(
                    pride_csv=options.mbm.lsq6.target_file,
                    output_dir=options.application.output_directory,
                    pipeline_name=options.application.pipeline_name),
                time_point=timepoint)

            options.mbm.lsq6 = options.mbm.lsq6.replace(
                target_type=TargetType.initial_model,
                target_file=targets.registration_standard.path)

        #    resolution = (options.registration.resolution
        #                  or get_resolution_from_file(targets.registration_standard.path))
        #    options.registration = options.registration.replace(resolution=resolution)

        # FIXME use of registration_standard here is quite wrong ...
        # part of the trouble is that mbm calls registration_targets itself,
        # so we can't send this RegistrationTargets to `mbm` directly ...
        # one option: add yet another optional arg to `mbm` ...
        else:
            targets = s.defer(
                registration_targets(lsq6_conf=options.mbm.lsq6,
                                     app_conf=options.application,
                                     reg_conf=options.registration,
                                     first_input_file=imgs.filename.iloc[0]))

        resolution = (options.registration.resolution
                      or get_resolution_from_file(
                          targets.registration_standard.path))

        # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution
        options.registration = options.registration.replace(
            resolution=resolution)

        return options

    # build all first-level models:
    first_level_results = (
        imgs  # TODO 'group' => 'timept' ?
        .groupby('group', as_index=False
                 )  # the usual annoying pattern to do an aggregate with access
        .aggregate({'file': lambda files: list(files)}
                   )  # to the groupby object's keys ... TODO: fix
        .rename(columns={
            'file': "files"
        }).assign(options=lambda df: df.apply(
            axis=1, func=lambda row: group_options(options, row.group))
                  ).assign(build_model=lambda df: df.apply(
                      axis=1,
                      func=lambda row: s.defer(
                          mbm(imgs=row.files,
                              options=row.options,
                              prefix="%s" % row.group,
                              output_dir=os.path.join(
                                  options.application.output_directory, options
                                  .application.pipeline_name + "_first_level",
                                  "%s_processed" % row.group))))
                           ).sort_values(by='group'))

    if all(
            first_level_results.options.map(
                lambda opts: opts.registration.resolution) ==
            first_level_results.options.iloc[0].registration.resolution):
        options.registration = options.registration.replace(
            resolution=first_level_results.options.iloc[0].registration.
            resolution)
    else:
        raise ValueError(
            "some first-level models are run at different resolutions, possibly not what you want ..."
        )

    # construction of the overall inter-average transforms will be done iteratively (for efficiency/aesthetics),
    # which doesn't really fit the DataFrame mold ...

    full_hierarchy = get_nonlinear_configuration_from_options(
        nlin_protocol=options.mbm.nlin.nlin_protocol,
        reg_method=options.mbm.nlin.reg_method,
        file_resolution=options.registration.resolution)

    # FIXME no good can come of this
    nlin_protocol = full_hierarchy.confs[-1] if isinstance(
        full_hierarchy, MultilevelANTSConf) else full_hierarchy
    # first register consecutive averages together:
    average_registrations = (
        first_level_results[:-1].assign(
            next_model=list(first_level_results[1:].build_model))
        # TODO: we should be able to do lsq6 registration here as well!
        .assign(xfm=lambda df: df.apply(
            axis=1,
            func=lambda row: s.defer(
                lsq12_nlin(source=row.build_model.avg_img,
                           target=row.next_model.avg_img,
                           lsq12_conf=get_linear_configuration_from_options(
                               options.mbm.lsq12,
                               transform_type=LinearTransType.lsq12,
                               file_resolution=options.registration.resolution
                           ),
                           nlin_conf=nlin_protocol)))))

    # now compose the above transforms to produce transforms from each average to the common average:
    common_time_pt = options.tamarack.common_time_pt
    common_model = first_level_results[
        first_level_results.group ==
        common_time_pt].iloc[0].build_model.avg_img
    #common = average_registrations[average_registrations.group == common_time_pt].iloc[0]
    before = average_registrations[
        average_registrations.group <
        common_time_pt]  # asymmetry in before/after since
    after = average_registrations[
        average_registrations.group >=
        common_time_pt]  # we used `next_`, not `previous_`

    # compose 1st and 2nd level transforms and resample into the common average space:
    def suffixes(xs):
        if len(xs) == 0:
            return [[]]
        else:
            ys = suffixes(xs[1:])
            return [[xs[0]] + ys[0]] + ys

    def prefixes(xs):
        if len(xs) == 0:
            return [[]]
        else:
            ys = prefixes(xs[1:])
            return ys + [ys[-1] + [xs[0]]]

    xfms_to_common = (first_level_results.assign(
        uncomposed_xfms=suffixes(list(before.xfm))[:-1] + [None] +
        prefixes(list(after.xfm))[1:]).assign(
            xfm_to_common=lambda df: df.apply(
                axis=1,
                func=lambda row: ((lambda x: s.defer(invert_xfmhandler(
                    x)) if row.group >= common_time_pt else x)(s.defer(
                        concat_xfmhandlers(
                            row.uncomposed_xfms,
                            name=("%s_to_common" if row.group < common_time_pt
                                  else "%s_from_common") % row.group))))
                if row.uncomposed_xfms is not None else None)).drop(
                    'uncomposed_xfms', axis=1))  # TODO None => identity??

    # TODO indexing here is not good ...
    first_level_determinants = pd.concat(list(
        first_level_results.build_model.apply(
            lambda x: x.determinants.assign(first_level_avg=x.avg_img))),
                                         ignore_index=True)

    resampled_determinants = (pd.merge(
        left=first_level_determinants,
        right=xfms_to_common.assign(source=lambda df: df.xfm_to_common.apply(
            lambda x: x.source if x is not None else None)),
        left_on="first_level_avg",
        right_on='source').assign(
            resampled_log_full_det=lambda df: df.apply(
                axis=1,
                func=lambda row: s.defer(
                    mincresample_new(img=row.log_full_det,
                                     xfm=row.xfm_to_common.xfm,
                                     like=common_model))
                if row.xfm_to_common is not None else row.img),
            resampled_log_nlin_det=lambda df: df.apply(
                axis=1,
                func=lambda row: s.defer(
                    mincresample_new(img=row.log_nlin_det,
                                     xfm=row.xfm_to_common.xfm,
                                     like=common_model))
                if row.xfm_to_common is not None else row.img)))

    inverted_overall_xfms = pd.Series({
        xfm: (s.defer(concat_xfmhandlers([xfm, row.xfm_to_common]))
              if row.xfm_to_common is not None else xfm)
        for _ix, row in xfms_to_common.iterrows()
        for xfm in row.build_model.xfms.lsq12_nlin_xfm
    })

    overall_xfms = inverted_overall_xfms.apply(
        lambda x: s.defer(invert_xfmhandler(x)))

    overall_determinants = determinants_at_fwhms(
        xfms=overall_xfms,
        blur_fwhms=options.mbm.stats.stats_kernels,
        inv_xfms=inverted_overall_xfms)

    # TODO turn off bootstrap as with two-level code?

    # TODO combine into one data frame
    return Result(stages=s,
                  output=Namespace(
                      first_level_results=first_level_results,
                      overall_determinants=overall_determinants,
                      resampled_determinants=resampled_determinants.drop(
                          ['options'], axis=1)))
예제 #26
0
def NLIN_pipeline(options):

    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM and LSQ6 ...
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    nlin_dir = os.path.join(output_dir, pipeline_name + "_nlin")

    resolution = (
        options.registration.
        resolution  # TODO does using the finest resolution here make sense?
        or min(
            [get_resolution_from_file(f) for f in options.application.files]))

    imgs = get_imgs(options.application)

    initial_target_mask = MincAtom(
        options.nlin.target_mask) if options.nlin.target_mask else None
    initial_target = MincAtom(options.nlin.target, mask=initial_target_mask)

    nlin_module = get_nonlinear_component(reg_method=options.nlin.reg_method)

    nlin_build_model_component = get_model_building_procedure(
        options.nlin.reg_strategy, reg_module=nlin_module)

    nlin_conf = (nlin_build_model_component.parse_build_model_protocol(
        options.nlin.nlin_protocol, resolution=resolution)
                 if options.nlin.nlin_protocol is not None else
                 nlin_build_model_component.get_default_build_model_conf(
                     resolution=resolution))

    s = Stages()

    nlin_result = s.defer(
        nlin_build_model_component.build_model(
            imgs=imgs,
            initial_target=initial_target,
            conf=nlin_conf,
            nlin_dir=nlin_dir,
            use_robust_averaging=options.nlin.use_robust_averaging,
            nlin_prefix=""))

    inverted_xfms = [
        s.defer(invert_xfmhandler(xfm)) for xfm in nlin_result.output
    ]

    if options.stats.calc_stats:

        determinants = s.defer(
            determinants_at_fwhms(xfms=inverted_xfms,
                                  inv_xfms=nlin_result.output,
                                  blur_fwhms=options.stats.stats_kernels))

        return Result(stages=s,
                      output=Namespace(nlin_xfms=nlin_result,
                                       avg_img=nlin_result.avg_img,
                                       determinants=determinants))
    else:
        # there's no consistency in what gets returned, yikes ...
        return Result(stages=s,
                      output=Namespace(nlin_xfms=nlin_result,
                                       avg_img=nlin_result.avg_img))
예제 #27
0
def generate_from_cli(options: configargparse.Namespace):
    """Generate from cli with options.

    Arguments:
        options (configargparse.Namespace): config options
    """
    if options.bcs is None:
        options.bcs = []
    np.random.seed(options.seed)
    seeds = np.random.randint(2**32, size=options.worker)
    print(options.worker)
    print(seeds)
    seeds_q: Queue = Queue()
    for seed in seeds:
        seeds_q.put(seed)

    unit_n = len(options.units)
    geometry = ["r"] * unit_n

    positions = np.array([k for k in options.positions])
    if options.positions_type == "coord":
        pass
    elif options.positions_type == "grid":
        positions = positions / (options.nx + 1) * options.length
    else:
        raise LookupError(f"Type {options.positions_type} is not supported!")

    task = get_task_powers_sampling(
        geometry_board="s",
        size_board=options.length,
        grid_board=options.nx + 1,
        geometry=geometry,
        size=options.units,
        angle=options.angles,
        intensity=options.powers,
        rad=False,
        position=positions,
    )
    task.warmup()  # warm up, especially for gibbs

    observation = False
    if options.observation_points is not None:
        observation = True
    # print(options)

    if options.method == "fenics":
        # 创建单参数函数

        method_fenics_p = partial(
            method_fenics,
            options=options,
            sampler=task.sample,
            task=task,
            observation=observation,
        )

        # for i in range(options.sample_n):
        #     method_fenics_p(i)

        # multiprocess support
        with Pool(options.worker, initializer=pool_init,
                  initargs=(seeds_q, )) as pool:
            pool_it = pool.imap(method_fenics_p, range(options.sample_n))
            # for i in pool_it:
            #     print(i)
            for _ in tqdm.tqdm(
                    pool_it,
                    desc=f"{pool._processes} workers's running",
                    total=options.sample_n,
                    ncols=100,
            ):
                pass

    print(f"Generated {options.sample_n} layouts in {options.data_dir}")
예제 #28
0
def parse(parser: Parser, args: List[str]) -> Namespace:
    # TODO: accepting a comma-separated list might allow more flexibility
    default_config_file = os.getenv("PYDPIPER_CONFIG_FILE")
    if default_config_file is not None:
        try:
            with open(default_config_file) as _:
                pass
        except:
            warnings.warn(
                f"PYDPIPER_CONFIG_FILE is set to '{default_config_file}', which can't be opened."
            )
    config_files = [default_config_file] if default_config_file else []

    # First, build a parser that's aware of all options
    # (will be used for help/version/error messages).
    # This must be tried _before_ the partial parsing attempts
    # in order to get correct help/version messages.

    main_parser = ArgParser(default_config_files=config_files)

    # TODO: abstract out the recursive travels in go_1 and go_2 into a `walk` function
    def go_1(p, current_prefix):
        if isinstance(p, BaseParser):
            g = main_parser.add_argument_group(p.group_name)
            for a in p.argparser._actions:
                new_a = copy.copy(a)
                ss = copy.deepcopy(new_a.option_strings)
                for ix, s in enumerate(new_a.option_strings):
                    if s.startswith("--"):
                        ss[ix] = "-" + current_prefix + "-" + s[
                            2:]  # "" was "-"
                    else:
                        raise NotImplementedError(
                            "sorry, I only understand flags starting with `--` at the moment, but got %s"
                            % s)
                new_a.option_strings = ss
                g._add_action(new_a)
        elif isinstance(p, CompoundParser):
            for q in p.parsers:
                go_1(
                    q.parser, current_prefix +
                    (('-' + q.prefix) if q.prefix is not None else ''))
        else:
            raise TypeError(
                "parser %s wasn't a %s (%s or %s) but a %s" %
                (p, Parser, BaseParser, CompoundParser, p.__class__))

    go_1(parser, "")

    # Use this parser to exit with a helpful message if parse fails or --help/--version specified:
    main_parser.parse_args(args)

    # Now, use parse_known_args for each parser in the tree of parsers to fill the appropriate namespace object ...
    def go_2(p, current_prefix, current_ns):
        if isinstance(p, BaseParser):
            new_p = ArgParser(default_config_files=config_files)
            for a in p.argparser._actions:
                new_a = copy.copy(a)
                ss = copy.deepcopy(new_a.option_strings)
                for ix, s in enumerate(new_a.option_strings):
                    if s.startswith("--"):
                        ss[ix] = "-" + current_prefix + "-" + s[2:]
                    else:
                        raise NotImplementedError
                    new_a.option_strings = ss
                new_p._add_action(new_a)
            _used_args, _rest = new_p.parse_known_args(args,
                                                       namespace=current_ns)
            # add a "_flags" field to each object so we know what flags caused a certain option to be set:
            # (however, note that post-parsing we may munge around ...)
            flags_dict = defaultdict(set)
            for action in new_p._actions:
                for opt in action.option_strings:
                    flags_dict[action.dest].add(opt)
            current_ns.flags_ = Namespace(**flags_dict)
            # TODO: could continue parsing from `_rest` instead of original `args`
        elif isinstance(p, CompoundParser):
            current_ns.flags_ = set(
            )  # could also check for the CompoundParser case and not set flags there,
            # since there will never be any
            for q in p.parsers:
                ns = Namespace()
                if q.namespace in current_ns.__dict__:
                    raise ValueError("Namespace field '%s' already in use" %
                                     q.namespace)
                    # TODO could also allow, say, a None
                else:
                    # gross but how to write n-ary identity fn that behaves sensibly on single arg??
                    current_ns.__dict__[q.namespace] = ns
                    # FIXME this casting doesn't work for configurations with positional arguments,
                    # which aren't unpacked correctly -- better to use a namedtuple
                    # (making all arguments keyword-only also works, but then you have to supply
                    # often meaningless defaults in the __init__)
                go_2(q.parser,
                     current_prefix=current_prefix +
                     (('-' + q.prefix) if q.prefix is not None else ''),
                     current_ns=ns)
                # If a cast function is provided, apply it to the namespace, possibly doing dynamic type checking
                # and also allowing the checker to provide hinting for the types of the fields
                flags = ns.flags_
                del ns.flags_
                fixed = (
                    q.cast(
                        current_ns.__dict__[q.namespace]
                    )  #(q.cast(**vars(current_ns.__dict__[q.namespace]))
                    if q.cast else current_ns.__dict__[q.namespace])
                if isinstance(fixed, tuple):
                    fixed = fixed.replace(flags_=flags)
                elif isinstance(fixed, Namespace):
                    setattr(fixed, "flags_", flags)
                else:
                    raise ValueError(
                        "currently only Namespace and NamedTuple objects are supported return types from "
                        "parsing; got %s (a %s)" % (fixed, type(fixed)))
                current_ns.__dict__[q.namespace] = fixed
                # TODO current_ns or current_namespace or ns or namespace?
        else:
            raise TypeError(
                "parser %s wasn't a %s (%s or %s) but a %s" %
                (p, Parser, BaseParser, CompoundParser, p.__class__))

    main_ns = Namespace()
    go_2(parser, current_prefix="", current_ns=main_ns)
    return main_ns
예제 #29
0
 def go_2(p, current_prefix, current_ns):
     if isinstance(p, BaseParser):
         new_p = ArgParser(default_config_files=config_files)
         for a in p.argparser._actions:
             new_a = copy.copy(a)
             ss = copy.deepcopy(new_a.option_strings)
             for ix, s in enumerate(new_a.option_strings):
                 if s.startswith("--"):
                     ss[ix] = "-" + current_prefix + "-" + s[2:]
                 else:
                     raise NotImplementedError
                 new_a.option_strings = ss
             new_p._add_action(new_a)
         _used_args, _rest = new_p.parse_known_args(args,
                                                    namespace=current_ns)
         # add a "_flags" field to each object so we know what flags caused a certain option to be set:
         # (however, note that post-parsing we may munge around ...)
         flags_dict = defaultdict(set)
         for action in new_p._actions:
             for opt in action.option_strings:
                 flags_dict[action.dest].add(opt)
         current_ns.flags_ = Namespace(**flags_dict)
         # TODO: could continue parsing from `_rest` instead of original `args`
     elif isinstance(p, CompoundParser):
         current_ns.flags_ = set(
         )  # could also check for the CompoundParser case and not set flags there,
         # since there will never be any
         for q in p.parsers:
             ns = Namespace()
             if q.namespace in current_ns.__dict__:
                 raise ValueError("Namespace field '%s' already in use" %
                                  q.namespace)
                 # TODO could also allow, say, a None
             else:
                 # gross but how to write n-ary identity fn that behaves sensibly on single arg??
                 current_ns.__dict__[q.namespace] = ns
                 # FIXME this casting doesn't work for configurations with positional arguments,
                 # which aren't unpacked correctly -- better to use a namedtuple
                 # (making all arguments keyword-only also works, but then you have to supply
                 # often meaningless defaults in the __init__)
             go_2(q.parser,
                  current_prefix=current_prefix +
                  (('-' + q.prefix) if q.prefix is not None else ''),
                  current_ns=ns)
             # If a cast function is provided, apply it to the namespace, possibly doing dynamic type checking
             # and also allowing the checker to provide hinting for the types of the fields
             flags = ns.flags_
             del ns.flags_
             fixed = (
                 q.cast(
                     current_ns.__dict__[q.namespace]
                 )  #(q.cast(**vars(current_ns.__dict__[q.namespace]))
                 if q.cast else current_ns.__dict__[q.namespace])
             if isinstance(fixed, tuple):
                 fixed = fixed.replace(flags_=flags)
             elif isinstance(fixed, Namespace):
                 setattr(fixed, "flags_", flags)
             else:
                 raise ValueError(
                     "currently only Namespace and NamedTuple objects are supported return types from "
                     "parsing; got %s (a %s)" % (fixed, type(fixed)))
             current_ns.__dict__[q.namespace] = fixed
             # TODO current_ns or current_namespace or ns or namespace?
     else:
         raise TypeError(
             "parser %s wasn't a %s (%s or %s) but a %s" %
             (p, Parser, BaseParser, CompoundParser, p.__class__))
def two_level(grouped_files_df, options: TwoLevelConf):
    """
    grouped_files_df - must contain 'group':<any comparable, sortable type> and 'file':MincAtom columns
    """  # TODO weird naming since the grouped_files_df isn't a GroupBy object?  just files_df?
    s = Stages()

    if grouped_files_df.isnull().values.any():
        raise ValueError("NaN values in input dataframe; can't go")

    if options.mbm.lsq6.target_type == TargetType.bootstrap:
        # won't work since the second level part tries to get the resolution of *its* "first input file", which
        # hasn't been created.  We could instead pass in a resolution to the `mbm` function,
        # but instead disable for now:
        raise ValueError(
            "Bootstrap model building currently doesn't work with this pipeline; "
            "just specify an initial target instead")
    elif options.mbm.lsq6.target_type == TargetType.pride_of_models:
        pride_of_models_mapping = get_pride_of_models_mapping(
            pride_csv=options.mbm.lsq6.target_file,
            output_dir=options.application.output_directory,
            pipeline_name=options.application.pipeline_name)

    # FIXME this is the same as in the 'tamarack' except for names of arguments/enclosing variables
    def group_options(options, group):
        options = copy.deepcopy(options)

        if options.mbm.lsq6.target_type == TargetType.pride_of_models:

            targets = get_closest_model_from_pride_of_models(
                pride_of_models_dict=pride_of_models_mapping, time_point=group)

            options.mbm.lsq6 = options.mbm.lsq6.replace(
                target_type=TargetType.initial_model,
                target_file=targets.registration_standard.path)
        else:
            # this will ensure that all groups have the same resolution -- is it necessary?
            targets = s.defer(
                registration_targets(
                    lsq6_conf=options.mbm.lsq6,
                    app_conf=options.application,
                    reg_conf=options.registration,
                    first_input_file=grouped_files_df.file.iloc[0]))

        resolution = (options.registration.resolution
                      or get_resolution_from_file(
                          targets.registration_standard.path))
        # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution
        options.registration = options.registration.replace(
            resolution=resolution)
        # no need to check common space settings here since they're turned off at the parser level
        # (a bit strange)
        return options

    first_level_results = (
        grouped_files_df.groupby(
            'group', as_index=False
        )  # the usual annoying pattern to do a aggregate with access
        .aggregate({'file': lambda files: list(files)
                    })  # to the groupby object's keys ... TODO: fix
        .rename(columns={
            'file': "files"
        }).assign(build_model=lambda df: df.apply(
            axis=1,
            func=lambda row: s.defer(
                mbm(imgs=row.files,
                    options=group_options(options, row.group),
                    prefix="%s" % row.group,
                    output_dir=os.path.join(
                        options.application.output_directory, options.
                        application.pipeline_name + "_first_level",
                        "%s_processed" % row.group))))))

    # TODO replace .assign(...apply(...)...) with just an apply, producing a series right away?

    # FIXME right now the same options set is being used for both levels -- use options.first/second_level
    second_level_options = copy.deepcopy(options)
    second_level_options.mbm.lsq6 = second_level_options.mbm.lsq6.replace(
        run_lsq6=False)
    second_level_options.mbm.segmentation.run_maget = False
    second_level_options.mbm.maget.maget.mask_only = False
    second_level_options.mbm.maget.maget.mask = False

    # FIXME this is probably a hack -- instead add a --second-level-init-model option to specify which timepoint should be used
    # as the initial model in the second level ???  (at this point it doesn't matter due to lack of lsq6 ...)
    if second_level_options.mbm.lsq6.target_type == TargetType.pride_of_models:
        second_level_options.mbm.lsq6 = second_level_options.mbm.lsq6.replace(
            target_type=TargetType.
            target,  # target doesn't really matter as no lsq6 here, just used for resolution...
            target_file=list(pride_of_models_mapping.values())
            [0].registration_standard.path)

    # NOTE: running lsq6_nuc_inorm here doesn't work in general (but possibly with rotational minctracc)
    # since the native-space initial model is used, but our images are
    # already in standard space (as we resampled there after the 1st-level lsq6).
    # On the other hand, we might want to run it here (although of course NOT nuc/inorm!) in the future,
    # for instance given a 'pride' of models (one for each group).

    second_level_results = s.defer(
        mbm(imgs=first_level_results.build_model.map(lambda m: m.avg_img),
            options=second_level_options,
            prefix=os.path.join(
                options.application.output_directory,
                options.application.pipeline_name + "_second_level")))

    # FIXME sadly, `mbm` doesn't return a pd.Series of xfms, so we don't have convenient indexing ...
    overall_xfms = [
        s.defer(concat_xfmhandlers([xfm_1, xfm_2])) for xfms_1, xfm_2 in
        zip([r.xfms.lsq12_nlin_xfm for r in first_level_results.build_model],
            second_level_results.xfms.overall_xfm) for xfm_1 in xfms_1
    ]
    resample = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer = np.vectorize(s.defer)

    # TODO using the avg_img here is a bit clunky -- maybe better to propagate group indices ...
    # only necessary since `mbm` doesn't return DataFrames but namespaces ...

    first_level_determinants = pd.concat(list(
        first_level_results.build_model.apply(
            lambda x: x.determinants.assign(first_level_avg=x.avg_img))),
                                         ignore_index=True)

    # first_level_xfms is only necessary because you otherwise have no access to the input file which is necessary
    # for merging with the input csv. lsq12_nlin_xfm can be used to merge, and rigid_xfm contains the input file.
    # If for some reason we want to output xfms in the future, just don't drop everything.
    first_level_xfms = pd.concat(
        list(
            first_level_results.build_model.apply(
                lambda x: x.xfms.assign(first_level_avg=x.avg_img))),
        ignore_index=True)[["lsq12_nlin_xfm", "rigid_xfm"]]
    if options.mbm.segmentation.run_maget:
        maget_df = pd.DataFrame([
            {
                "label_file": x.labels.path,
                "native_file": x.orig_path
            }  #, "_merge" : basename(x.orig_path)}
            for x in pd.concat([
                namespace.maget_result
                for namespace in first_level_results.build_model
            ])
        ])
        first_level_xfms = pd.merge(
            left=first_level_xfms.assign(native_file=lambda df: df.rigid_xfm.
                                         apply(lambda x: x.source.path)),
            right=maget_df,
            on="native_file")
    first_level_determinants = (pd.merge(left=first_level_determinants,
                                         right=first_level_xfms,
                                         left_on="inv_xfm",
                                         right_on="lsq12_nlin_xfm").drop(
                                             ["rigid_xfm", "lsq12_nlin_xfm"],
                                             axis=1))

    resampled_determinants = (pd.merge(
        left=first_level_determinants,
        right=pd.DataFrame({
            'group_xfm': second_level_results.xfms.overall_xfm
        }).assign(source=lambda df: df.group_xfm.apply(lambda r: r.source)),
        left_on="first_level_avg",
        right_on="source").assign(
            resampled_log_full_det=lambda df: defer(
                resample(img=df.log_full_det,
                         xfm=df.group_xfm.apply(lambda x: x.xfm),
                         like=second_level_results.avg_img)),
            resampled_log_nlin_det=lambda df: defer(
                resample(img=df.log_nlin_det,
                         xfm=df.group_xfm.apply(lambda x: x.xfm),
                         like=second_level_results.avg_img))))
    # TODO only resamples the log determinants, but still a bit ugly ... abstract somehow?
    # TODO shouldn't be called resampled_determinants since this is basically the whole (first_level) thing ...

    inverted_overall_xfms = [
        s.defer(invert_xfmhandler(xfm)) for xfm in overall_xfms
    ]

    overall_determinants = (s.defer(
        determinants_at_fwhms(
            xfms=inverted_overall_xfms,
            inv_xfms=overall_xfms,
            blur_fwhms=options.mbm.stats.stats_kernels)).assign(
                overall_log_full_det=lambda df: df.log_full_det,
                overall_log_nlin_det=lambda df: df.log_nlin_det).drop(
                    ['log_full_det', 'log_nlin_det'], axis=1))

    # TODO return some MAGeT stuff from two_level function ??
    # FIXME running MAGeT from within the `two_level` function has the same problem as running it from within `mbm`:
    # it will now run when this pipeline is called from within another one (e.g., n-level), which will be
    # redundant, create filename clashes, etc. -- this should be moved to `two_level_pipeline`.
    # TODO do we need a `pride of atlases` for MAGeT in this pipeline ??
    # TODO at the moment MAGeT is run within the MBM code, but it could be disabled there and run here
    #if options.mbm.segmentation.run_maget:
    #    maget_options = copy.deepcopy(options)
    #    maget_options.maget = options.mbm.maget
    #    fixup_maget_options(maget_options=maget_options.maget,
    #                        lsq12_options=maget_options.mbm.lsq12,
    #                        nlin_options=maget_options.mbm.nlin)
    #    maget_options.maget.maget.mask = maget_options.maget.maget.mask_only = False   # already done above
    #    del maget_options.mbm

    # again using a weird combination of vectorized and loop constructs ...
    #    s.defer(maget([xfm.resampled for _ix, m in first_level_results.iterrows()
    #                   for xfm in m.build_model.xfms.rigid_xfm],
    #                  options=maget_options,
    #                  prefix="%s_MAGeT" % options.application.pipeline_name,
    #                  output_dir=os.path.join(options.application.output_directory,
    #                                          options.application.pipeline_name + "_processed")))

    # TODO resampling to database model ...

    # TODO there should be one table containing all determinants (first level, overall, resampled first level) for each file
    # and another containing some groupwise information (averages and transforms to the common average)
    return Result(stages=s,
                  output=Namespace(
                      first_level_results=first_level_results,
                      resampled_determinants=resampled_determinants,
                      overall_determinants=overall_determinants))
예제 #31
0
 def test_with_files(self, application_parse):
     assert is_recursive_subnamespace(Namespace(application=Namespace(files=["img_1.mnc"]),
                                                mbm=Namespace(lsq12=Namespace(max_pairs=20))),
                                      application_parse)