Exemplo n.º 1
0
def run_ckpt(databin_path, ckpt, output_dir, scorer_type, gold_m2_file,
             ori_path, ori_bpe_path, gen_subset, remove_unk_edits,
             remove_error_type_lst, apply_rerank, preserve_spell, max_edits):

    logging.info(f"[Run-ckpt] working on {ckpt}")
    os.makedirs(output_dir, exist_ok=True)

    ckpt_lst = ckpt.split(":")
    ckpt_basename = ''
    for c in ckpt_lst:
        b = util.get_basename(c, include_path=False, include_extension=False)
        ckpt_basename += b

    data_basename = util.get_basename(ori_path,
                                      include_path=False,
                                      include_extension=False)
    system_out_basename = os.path.join(output_dir,
                                       f"{ckpt_basename}.{data_basename}")
    system_out = f"{system_out_basename}.out"

    if not os.path.isfile(system_out):
        logging.info(f"[Run-ckpt] 1. generate into {system_out}")
        generate(databin_path,
                 ckpt,
                 system_out,
                 ori_path=ori_bpe_path,
                 gen_subset=gen_subset)

    cor_path = util.get_cor_path(system_out, remove_unk_edits,
                                 remove_error_type_lst, apply_rerank,
                                 preserve_spell, max_edits)

    if not os.path.isfile(cor_path):
        logging.info(f"[Run-ckpt] 2. postprocess into {cor_path}")
        postprocess(ori_path, system_out, cor_path, remove_unk_edits,
                    remove_error_type_lst, apply_rerank, preserve_spell,
                    max_edits)

    report_path = f"{util.get_basename(cor_path, include_extension=False)}.report"
    if not os.path.isfile(report_path):
        logging.info(f"[Run-ckpt] 3. evaluation into {report_path}")
        if scorer_type is not None and gold_m2_file is not None:
            evaluate(scorer_type, ori_path, cor_path, gold_m2_file,
                     report_path)
def save_transform_to_database(data_transforms):
    """ Save data transforms to database"""
    # pylint: disable= too-many-locals, bare-except
    conn = sqlite3.connect(util.DB_PATH)
    conn.text_factory = str

    for img in data_transforms:
        cursor = conn.execute('''SELECT pid from Images where id = ? ''', (img.image_id,))
        pid = cursor.fetchone()[0]

        folder = util.DATA_FOLDER + str(pid) + "/registration_transforms/"
        util.mkdir_p(folder)

        transform_paths = ""
        print(img.get_transforms())
        for _transform in img.get_transforms():
            print(_transform)
            dst_file = folder + util.get_basename(_transform) + '.h5.gz'
            if os.path.exists(dst_file):
                os.remove(dst_file)
            with open(_transform, 'rb') as f_in, gzip.open(dst_file, 'wb') as f_out:
                shutil.copyfileobj(f_in, f_out)
            transform_paths += str(pid) + "/registration_transforms/" +\
                basename(_transform) + '.h5.gz' + ", "
        transform_paths = transform_paths[:-2]

        cursor2 = conn.execute('''UPDATE Images SET transform = ? WHERE id = ?''',
                               (transform_paths, img.image_id))
        cursor2 = conn.execute('''UPDATE Images SET fixed_image = ? WHERE id = ?''',
                               (img.fixed_image, img.image_id))

        folder = util.DATA_FOLDER + str(pid) + "/reg_volumes_labels/"
        util.mkdir_p(folder)
        vol_path = util.compress_vol(img.processed_filepath)
        shutil.copy(vol_path, folder)

        volume_db = str(pid) + "/reg_volumes_labels/" + basename(vol_path)
        cursor2 = conn.execute('''UPDATE Images SET filepath_reg = ? WHERE id = ?''',
                               (volume_db, img.image_id))

        cursor = conn.execute('''SELECT filepath, id from Labels where image_id = ? ''',
                              (img.image_id,))
        for (row, label_id) in cursor:
            temp = util.compress_vol(move_vol(util.DATA_FOLDER + row,
                                              img.get_transforms(), True))
            shutil.copy(temp, folder)
            label_db = str(pid) + "/reg_volumes_labels/" + basename(temp)
            cursor2 = conn.execute('''UPDATE Labels SET filepath_reg = ? WHERE id = ?''',
                                   (label_db, label_id))

        conn.commit()
        cursor.close()
        cursor2.close()

#    cursor = conn.execute('''VACUUM; ''')
    conn.close()
Exemplo n.º 3
0
    def execute(self):
        # Ejecutar el comando mmls sobre un fichero

        Comandos.log.info('Execute mmls')

        destination_file = self.destination_folder + os.sep + util.get_basename(
            self.file_name)
        command = ['mmls']
        command.append(destination_file)
        (output, err) = util.execute_shell(util.join_list(' ', command))
        print(output)

        Comandos.log.info('Mmls finished')
Exemplo n.º 4
0
def postprocess(ori_path,
                system_out,
                cor_path,
                remove_unk_edits=True,
                remove_error_type_lst=[],
                apply_rerank=False,
                preserve_spell=False,
                max_edits=None):

    pred = f"{system_out}.pred"
    m2_file_tmp = f"{system_out}._m2"

    try:
        logging.info("[Postprocess] 1. get pred file")
        m2.sys_to_cor(system_out, pred)

        logging.info("[Postprocess] 2. convert pred into m2")
        m2.parallel_to_m2(ori_path, pred, m2_file_tmp)

        logging.info("[Postprocess] 3. adjust m2")
        m2_entries = m2.get_m2_entries(m2_file_tmp)

        if remove_unk_edits:
            logging.info("[Postprocess] 3-1. removing <unk> edits")
            m2_entries = m2.remove_m2(m2_entries, None, '<unk>')

        if len(remove_error_type_lst) > 0:
            logging.info("[Postprocess] 3-2. remove error types")
            m2_entries = m2.remove_m2(m2_entries, remove_error_type_lst, None)

        if apply_rerank:
            logging.info("[Postprocess] 3-3. apply rerank")
            lm_scorer = load_lm()
            m2_entries = m2.apply_lm_rerank(m2_entries, preserve_spell,
                                            max_edits, lm_scorer)

        logging.info("[Postprocess] 4. get pred again")
        logging.info("[Postprocess] 4-1. write m2 file")

        cor_basename = util.get_basename(cor_path, include_extension=False)
        m2_file = f"{cor_basename}.m2"
        m2.write_m2_entries(m2_entries, m2_file)

        logging.info("[Postprocess] 4-2. write cor file")
        m2.m2_to_parallel([m2_file], None, cor_path, False, True)

    except:
        logging.error("[Postprocess] error occurred")
Exemplo n.º 5
0
        def _get_ckpt_dir_basename(train_mode, model, lr, dropout, seed,
                                   prev_model_dir):
            basenames = []
            if prev_model_dir is not None:
                prev_model_basename = util.get_basename(
                    prev_model_dir,
                    include_path=False,
                    include_extension=False)
                basenames.append(prev_model_basename)

            basename = f"{train_mode}-{model}-lr{lr}-dr{dropout}"
            if seed is not None:
                basename += f"-s{seed}"
            basenames.append(basename)

            return "_".join(basenames)
Exemplo n.º 6
0
        def _get_output_dir_from_ckpt_fpath(ckpt_fpath):
            ckpts = ckpt_fpath.split(':')

            # not ensemble
            if len(ckpts) == 1:
                ckpt_dir = os.path.dirname(ckpt_fpath)
                return _get_output_dir_from_ckpt_dir(ckpt_dir)

            # ensemble
            else:
                dirname_lst = []
                for ckpt in ckpts:
                    ckpt_dir = os.path.dirname(ckpt)
                    ckpt_dir_basename = util.get_basename(ckpt_dir,
                                                          include_path=False)
                    dirname_lst.append(ckpt_dir_basename)
                return f"{self.TRACK_PATH}/outputs/" + ":".join(dirname_lst)
def move_vol(moving, transform, label_img=False):
    """ Move data with transform """
    if label_img:
        # resample volume to 1 mm slices
        target_affine_3x3 = np.eye(3) * 1
        img_3d_affine = resample_img(moving, target_affine=target_affine_3x3,
                                     interpolation='nearest')
        resampled_file = util.TEMP_FOLDER_PATH + util.get_basename(moving) + '_resample.nii.gz'
        # pylint: disable= no-member
        img_3d_affine.to_filename(resampled_file)

    else:
        img = img_data(-1, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
        img.set_img_filepath(moving)
        resampled_file = pre_process(img, False).pre_processed_filepath

    result = util.transform_volume(moving, transform, label_img)
    util.generate_image(result, util.TEMPLATE_VOLUME)
    return result
def post_calculations(moving_dataset_image_ids, result=None):
    """ Transform images and calculate avg"""
    if result is None:
        result = {}

    for _id in moving_dataset_image_ids:
        img = img_data(_id, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
        img.load_db_transforms()

        img_pre = img_data(img.fixed_image, util.DATA_FOLDER,
                           util.TEMP_FOLDER_PATH)
        img_pre.load_db_transforms()

        reg_vol = util.transform_volume(img.reg_img_filepath,
                                        img_pre.get_transforms())
        vol = util.TEMP_FOLDER_PATH + util.get_basename(
            basename(reg_vol)) + '_BE.nii.gz'

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_vol
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = vol
        mult.run()

        label = "img"
        if label in result:
            result[label].append(vol)
        else:
            result[label] = [vol]

        for (segmentation, label) in util.find_reg_label_images(_id):
            segmentation = util.transform_volume(segmentation,
                                                 img_pre.get_transforms(),
                                                 label_img=True)
            if label in result:
                result[label].append(segmentation)
            else:
                result[label] = [segmentation]
    return result
Exemplo n.º 9
0
def move_vol(moving, transform, label_img=False, slice_size=1, ref_img=None):
    """ Move data with transform """
    if label_img:
        # resample volume to 1 mm slices
        target_affine_3x3 = np.eye(3) * slice_size
        img_3d_affine = resample_img(moving,
                                     target_affine=target_affine_3x3,
                                     interpolation='nearest')
        resampled_file = util.TEMP_FOLDER_PATH + util.get_basename(
            moving) + '_resample.nii.gz'
        # pylint: disable= no-member
        img_3d_affine.to_filename(resampled_file)
        del img_3d_affine
    else:
        img = img_data(-1, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
        img.set_img_filepath(moving)
        resampled_file = pre_process(img, False).pre_processed_filepath

    result = util.transform_volume(resampled_file,
                                   transform,
                                   label_img,
                                   ref_img=ref_img)
    util.generate_image(result, util.TEMPLATE_VOLUME)
    return result
Exemplo n.º 10
0
def pre_process(img, do_bet=True, slice_size=1, reg_type=None, be_method=None):
    # pylint: disable= too-many-statements, too-many-locals, too-many-branches
    """ Pre process the data"""
    path = img.temp_data_path

    input_file = img.img_filepath
    n4_file = path + util.get_basename(input_file) + '_n4.nii.gz'
    norm_file = path + util.get_basename(n4_file) + '_norm.nii.gz'
    resampled_file = path + util.get_basename(norm_file) + '_resample.nii.gz'
    name = util.get_basename(resampled_file) + "_be"
    img.pre_processed_filepath = path + name + '.nii.gz'

    n4bias = ants.N4BiasFieldCorrection()
    n4bias.inputs.dimension = 3
    n4bias.inputs.num_threads = NUM_THREADS_ANTS
    n4bias.inputs.input_image = input_file
    n4bias.inputs.output_image = n4_file
    n4bias.run()

    # normalization [0,100], same as template
    normalize_img = nib.load(n4_file)
    temp_data = normalize_img.get_data()
    temp_img = nib.Nifti1Image(temp_data / np.amax(temp_data) * 100,
                               normalize_img.affine, normalize_img.header)
    temp_img.to_filename(norm_file)
    del temp_img

    # resample volume to 1 mm slices
    target_affine_3x3 = np.eye(3) * slice_size
    img_3d_affine = resample_img(norm_file, target_affine=target_affine_3x3)
    nib.save(img_3d_affine, resampled_file)

    if not do_bet:
        img.pre_processed_filepath = resampled_file
        return img

    if be_method == 0:
        img.init_transform = path + name + '_InitRegTo' + str(
            img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = resampled_file
        reg.inputs.moving_image = util.TEMPLATE_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = NUM_THREADS_ANTS
        reg.inputs.initial_moving_transform_com = True

        if reg_type == RIGID:
            reg.inputs.transforms = ['Rigid', 'Rigid']
        elif reg_type == COMPOSITEAFFINE:
            reg.inputs.transforms = ['Rigid', 'CompositeAffine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity']
        else:
            reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.number_of_iterations = ([[
            15000, 12000, 10000, 10000, 10000, 5000, 5000
        ], [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[19, 16, 12, 9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[10, 10, 10, 8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2
        reg.inputs.transform_parameters = [(0.25, ), (0.25, )]
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_beReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        util.LOGGER.info("starting be registration")
        reg.run()
        util.LOGGER.info("Finished be registration")

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    elif be_method == 1:
        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command=BET_COMMAND)
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.25
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        # bet.inputs.reduce_bias = True
        bet.inputs.mask = True

        bet.inputs.out_file = img.pre_processed_filepath

        bet.run()
        util.generate_image(img.pre_processed_filepath, resampled_file)
    elif be_method == 2:
        if BET_FRAC > 0:
            name = util.get_basename(resampled_file) + "_bet"
            # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
            bet = fsl.BET(command=BET_COMMAND)
            bet.inputs.in_file = resampled_file
            # pylint: disable= pointless-string-statement
            """ fractional intensity threshold (0->1); default=0.5;
            smaller values give larger brain outline estimates"""
            bet.inputs.frac = BET_FRAC
            """ vertical gradient in fractional intensity threshold (-1->1);
            default=0; positive values give larger brain outline at bottom,
            smaller at top """
            bet.inputs.vertical_gradient = 0
            """  This attempts to reduce image bias, and residual neck voxels.
            This can be useful when running SIENA or SIENAX, for example.
            Various stages involving FAST segmentation-based bias field removal
            and standard-space masking are combined to produce a result which
            can often give better results than just running bet2."""
            bet.inputs.reduce_bias = True
            bet.inputs.mask = True
            bet.inputs.out_file = path + name + '.nii.gz'
            util.LOGGER.info("starting bet registration")
            start_time = datetime.datetime.now()
            util.LOGGER.info(bet.cmdline)
            if not os.path.exists(bet.inputs.out_file):
                bet.run()
            util.LOGGER.info("Finished bet registration 0: ")
            util.LOGGER.info(datetime.datetime.now() - start_time)
            name += "_be"
            print('OR HERE 3??????')
            moving_image = util.TEMPLATE_MASKED_VOLUME
            print(moving_image)
            fixed_image = bet.inputs.out_file
        else:
            name = util.get_basename(resampled_file) + "_be"
            moving_image = util.TEMPLATE_VOLUME
            fixed_image = resampled_file
        img.init_transform = path + name + '_InitRegTo' + str(
            img.fixed_image) + '.h5'
        img.pre_processed_filepath = path + name + '.nii.gz'
        print('DO I GET HERE??????')
        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = fixed_image
        print('OR HERE 1??????')
        print(moving_image)
        reg.inputs.moving_image = moving_image
        print('OR HERE 2??????')
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = NUM_THREADS_ANTS
        reg.inputs.initial_moving_transform_com = True

        if reg_type == RIGID:
            reg.inputs.transforms = ['Rigid', 'Rigid']
        elif reg_type == COMPOSITEAFFINE:
            reg.inputs.transforms = ['Rigid', 'CompositeAffine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity']
        elif reg_type == AFFINE:
            reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2
        reg.inputs.sampling_percentage = [0.5] * 2
        reg.inputs.number_of_iterations = ([[10000, 10000, 5000, 5000],
                                            [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25, ), (0.25, )]
        reg.inputs.convergence_threshold = [1.e-6] * 2
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_TemplateReg.nii.gz'
        print('FINISHED SETTING UP ALL reg.inputs')
        transform = path + name + 'InverseComposite.h5'
        util.LOGGER.info("starting be registration")
        util.LOGGER.info(reg.cmdline)
        start_time = datetime.datetime.now()
        if not os.path.exists(reg.inputs.output_warped_image):
            reg.run()
        util.LOGGER.info("Finished be registration: ")
        util.LOGGER.info(datetime.datetime.now() - start_time)

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        brain_mask = util.TEMPLATE_MASK
        #brain_mask = img.reg_brainmask_filepath
        if not brain_mask:
            brain_mask = util.TEMPLATE_MASK
        print("Using brain mask " + brain_mask)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = brain_mask
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    else:
        util.LOGGER.error(" INVALID BE METHOD!!!!")

    util.LOGGER.info("---BET " + img.pre_processed_filepath)
    return img
Exemplo n.º 11
0
def save_transform_to_database(imgs):
    """ Save data transforms to database"""
    # pylint: disable= too-many-locals, bare-except
    conn = sqlite3.connect(util.DB_PATH, timeout=900)
    conn.text_factory = str

    try:
        conn.execute(
            "alter table Images add column 'registration_date' 'TEXT'")
    except sqlite3.OperationalError:
        pass

    for img in imgs:
        cursor = conn.execute('''SELECT pid from Images where id = ? ''',
                              (img.image_id, ))
        pid = cursor.fetchone()[0]

        folder = util.DATA_FOLDER + str(pid) + "/registration_transforms/"
        if os.path.exists(folder):
            shutil.rmtree(folder)
        os.makedirs(folder)

        transform_paths = ""
        util.LOGGER.info(img.get_transforms())
        for _transform in img.get_transforms():
            util.LOGGER.info(_transform)
            dst_file = folder + util.get_basename(_transform) + '.h5.gz'
            with open(_transform, 'rb') as f_in, gzip.open(dst_file,
                                                           'wb') as f_out:
                shutil.copyfileobj(f_in, f_out)
            transform_paths += str(pid) + "/registration_transforms/" +\
                util.get_basename(_transform) + '.h5.gz' + ", "
        transform_paths = transform_paths[:-2]

        cursor2 = conn.execute(
            '''UPDATE Images SET transform = ? WHERE id = ?''',
            (transform_paths, img.image_id))
        cursor2 = conn.execute(
            '''UPDATE Images SET fixed_image = ? WHERE id = ?''',
            (img.fixed_image, img.image_id))

        cursor2 = conn.execute(
            '''UPDATE Images SET registration_date = ? WHERE id = ?''',
            (datetime.datetime.now().strftime("%Y-%m-%d"), img.image_id))

        folder = util.DATA_FOLDER + str(pid) + "/reg_volumes_labels/"
        if os.path.exists(folder):
            shutil.rmtree(folder)
        os.makedirs(folder)
        vol_path = util.compress_vol(img.processed_filepath)
        shutil.copy(vol_path, folder)

        volume_db = str(pid) + "/reg_volumes_labels/" + basename(vol_path)
        cursor2 = conn.execute(
            '''UPDATE Images SET filepath_reg = ? WHERE id = ?''',
            (volume_db, img.image_id))

        cursor = conn.execute(
            '''SELECT filepath, id from Labels where image_id = ? ''',
            (img.image_id, ))
        for (filepath, label_id) in cursor:
            temp = util.compress_vol(
                move_vol(util.DATA_FOLDER + filepath, img.get_transforms(),
                         True))
            shutil.copy(temp, folder)
            label_db = str(pid) + "/reg_volumes_labels/" + basename(temp)
            cursor2 = conn.execute(
                '''UPDATE Labels SET filepath_reg = ? WHERE id = ?''',
                (label_db, label_id))

        conn.commit()
        cursor.close()
        cursor2.close()
Exemplo n.º 12
0
def registration(moving_img, fixed, reg_type):
    # pylint: disable= too-many-statements, too-many-branches
    """Image2Image registration """
    reg = ants.Registration()

    path = moving_img.temp_data_path
    name = util.get_basename(
        moving_img.pre_processed_filepath) + '_' + reg_type
    moving_img.processed_filepath = path + name + '_RegTo' + str(
        moving_img.fixed_image) + '.nii.gz'
    moving_img.transform = path + name + '_RegTo' + str(
        moving_img.fixed_image) + '.h5'

    init_moving_transform = moving_img.init_transform
    if init_moving_transform is not None and os.path.exists(
            init_moving_transform):
        util.LOGGER.info("Found initial transform")
        # reg.inputs.initial_moving_transform = init_moving_transform
        reg.inputs.initial_moving_transform_com = False
        mask = util.transform_volume(moving_img.label_inv_filepath,
                                     moving_img.init_transform,
                                     label_img=True)
    else:
        reg.inputs.initial_moving_transform_com = True
        mask = moving_img.label_inv_filepath
    reg.inputs.collapse_output_transforms = True
    reg.inputs.fixed_image = moving_img.pre_processed_filepath
    reg.inputs.fixed_image_mask = mask
    reg.inputs.moving_image = fixed
    reg.inputs.num_threads = NUM_THREADS_ANTS
    if reg_type == RIGID:
        reg.inputs.transforms = ['Rigid', 'Rigid', 'Rigid']
        reg.inputs.metric = ['MI', 'MI', 'MI']
        reg.inputs.metric_weight = [1] * 2 + [1]
        reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [None]
        reg.inputs.sampling_percentage = [0.5] * 2 + [None]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [75, 50, 50]])
            reg.inputs.shrink_factors = [[12, 9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[9, 8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [75, 50]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 3
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.25, )]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    elif reg_type == AFFINE or reg_type == COMPOSITEAFFINE or reg_type == SIMILARITY:
        if reg_type == AFFINE:
            reg.inputs.transforms = ['Rigid', 'Affine', 'Affine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity', 'Similarity']
        else:
            reg.inputs.transforms = [
                'Rigid', 'CompositeAffine', 'CompositeAffine'
            ]
        reg.inputs.metric = ['MI', 'MI', 'MI']
        reg.inputs.metric_weight = [1] * 2 + [1]
        reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [None]
        reg.inputs.sampling_percentage = [0.5] * 2 + [None]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [75, 50, 50]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [75, 50]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 3
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.25, )]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    elif reg_type == SYN:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.metric = ['MI', 'MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] * 2 + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [100, 75, 75, 75]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [5, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [4, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [100, 90, 75]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [4, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [1, 0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2 + [-0.01]
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.2, 3.0, 0.0)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    else:
        raise Exception("Wrong registration format " + reg_type)
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.write_composite_transform = True
    reg.inputs.output_transform_prefix = path + name
    transform = path + name + 'InverseComposite.h5'

    if os.path.exists(moving_img.processed_filepath) and\
       os.path.exists(moving_img.transform):
        # generate_image(reg.inputs.output_warped_image, fixed)
        return moving_img
    util.LOGGER.info("starting registration")
    start_time = datetime.datetime.now()
    util.LOGGER.info(reg.cmdline)
    reg.run()
    util.LOGGER.info("Finished registration: ")
    util.LOGGER.info(datetime.datetime.now() - start_time)

    util.transform_volume(moving_img.pre_processed_filepath,
                          transform,
                          outputpath=moving_img.processed_filepath)
    shutil.copy(transform, moving_img.transform)
    util.generate_image(moving_img.processed_filepath, fixed)

    return moving_img
def pre_process(img, do_bet=True):
    # pylint: disable= too-many-statements, too-many-locals
    """ Pre process the data"""

    path = img.temp_data_path

    input_file = img.img_filepath
    n4_file = path + util.get_basename(input_file) + '_n4.nii.gz'
    norm_file = path + util.get_basename(n4_file) + '_norm.nii.gz'
    resampled_file = path + util.get_basename(norm_file) + '_resample.nii.gz'
    name = util.get_basename(resampled_file) + "_be"
    img.pre_processed_filepath = path + name + '.nii.gz'

    if os.path.exists(img.pre_processed_filepath) and\
       (os.path.exists(path + name + 'Composite.h5') or BE_METHOD == 1):
        if BE_METHOD == 0:
            img.init_transform = path + name + 'Composite.h5'
        util.generate_image(img.pre_processed_filepath, util.TEMPLATE_VOLUME)
        return img

    n4bias = ants.N4BiasFieldCorrection()
    n4bias.inputs.dimension = 3
    n4bias.inputs.input_image = input_file
    n4bias.inputs.output_image = n4_file
    n4bias.run()

    # normalization [0,100], same as template
    normalize_img = nib.load(n4_file)
    temp_data = normalize_img.get_data()
    temp_img = nib.Nifti1Image(temp_data/np.amax(temp_data)*100,
                               normalize_img.affine, normalize_img.header)
    temp_img.to_filename(norm_file)

    # resample volume to 1 mm slices
    target_affine_3x3 = np.eye(3) * 1
    img_3d_affine = resample_img(norm_file, target_affine=target_affine_3x3)
    nib.save(img_3d_affine, resampled_file)

    if not do_bet:
        img.pre_processed_filepath = resampled_file
        return img

    if BE_METHOD == 0:
        img.init_transform = path + name + '_InitRegTo' + str(img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = resampled_file
        reg.inputs.moving_image = util.TEMPLATE_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = 1
        reg.inputs.initial_moving_transform_com = True

        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.number_of_iterations = ([[10000, 10000, 10000, 10000],
                                            [10000, 10000, 10000, 10000]])

        reg.inputs.convergence_threshold = [1.e-6]*2
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25,), (0.25,)]
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_beReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        print("starting be registration")
        reg.run()
        print("Finished be registration")

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    elif BE_METHOD == 1:
        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command="fsl5.0-bet")
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.25
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        # bet.inputs.reduce_bias = True
        bet.inputs.mask = True

        bet.inputs.out_file = img.pre_processed_filepath

        bet.run()
        util.generate_image(img.pre_processed_filepath, resampled_file)
    elif BE_METHOD == 2:
        name = util.get_basename(resampled_file) + "_bet"

        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command="fsl5.0-bet")
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.1
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        bet.inputs.reduce_bias = True
        bet.inputs.mask = True
        bet.inputs.out_file = path + name + '.nii.gz'
        print("starting bet registration")
        start_time = datetime.datetime.now()
        if not os.path.exists(bet.inputs.out_file):
            bet.run()
        print("Finished bet registration 0: ")
        print(datetime.datetime.now() - start_time)

        name = name + "_be"
        img.pre_processed_filepath = path + name + '.nii.gz'
        img.init_transform = path + name + '_InitRegTo' + str(img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = bet.inputs.out_file
        reg.inputs.moving_image = util.TEMPLATE_MASKED_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = 8
        reg.inputs.initial_moving_transform_com = True

        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2
        reg.inputs.sampling_percentage = [0.5] * 2
        reg.inputs.number_of_iterations = ([[10000, 10000, 5000, 5000],
                                            [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25,), (0.25,)]
        reg.inputs.convergence_threshold = [1.e-6]*2
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_TemplateReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        print("starting be registration")
        start_time = datetime.datetime.now()
        if not os.path.exists(reg.inputs.output_warped_image):
            reg.run()
        print("Finished be registration: ")
        print(datetime.datetime.now() - start_time)

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)

    print("---BET", img.pre_processed_filepath)
    return img
Exemplo n.º 14
0
    def execute(self):
        # Copiar datos de carpeta incoming a carpeta destino con integridad de datos

        # Para verificar la integridad se cogera cada fichero y se le calculara el md5. Cuando se copie al destino se calculara
        # tambien el md5 para verificar que la copia se ha realizado correctamente

        Comandos.log.info('Copy files')

        # Filtrar todos los ficheros que hay en incoming para quedarse solo con los que se necesitan copiar (estan en files)
        incomingFiles = os.listdir(self.incomingFolder)
        if self.files != None:
            incomingFilesFiltered = list(
                filter(lambda f: f in self.files, incomingFiles))
        else:
            incomingFilesFiltered = incomingFiles

        # Si no hay ficheros que copiar el proceso ha finalizado
        if len(incomingFilesFiltered) == 0:
            Comandos.log.info('No files to process')
            return

        # Crear carpeta destino si no existe
        util.create_folder_if_not_exist(self.destinationFolder)

        # Copiar solo los ficheros filtrados
        incomingFilesFullPath = list()
        for file in incomingFilesFiltered:
            incomingFile = self.incomingFolder + os.sep + file
            shutil.copy2(incomingFile, self.destinationFolder)
            Comandos.log.info('Copy file ' + incomingFile + ' to ' +
                              self.destinationFolder)
            incomingFilesFullPath.append(incomingFile)

        # Calcular el hash de cada fichero origen, de cada fichero destino y verificar que son el mismo para verificar integridad

        # Recuperar los nombres de fichero originales
        Comandos.log.info('Files: ' + str(incomingFilesFiltered))

        # Lista donde se guardaran todos los ficheros pertenecientes al caso, todos con los mismos atributos CASE, ID y ALIAS
        forensicCase = list()

        for incomingFile in incomingFilesFullPath:
            destinationFile = self.destinationFolder + os.sep + util.get_basename(
                incomingFile)

            md5IncomingFile = util.get_md5(incomingFile)
            md5DestinationFile = util.get_md5(destinationFile)

            if md5IncomingFile != md5DestinationFile:
                Comandos.log.error('Error: integrity error file ' +
                                   incomingFile)
                print('Error: integrity error file ' + incomingFile)
                return

            # Recuperar datos del fichero copiado
            dataFile = os.stat(destinationFile)

            fileCase = model.FileCase(
                caseName=self.caseName,
                idCase=self.idCase,
                alias=self.alias,
                size=util.get_bytes_in_megabytes(dataFile.st_size),
                fileName=util.get_basename(destinationFile),
                hashMd5=md5DestinationFile,
                # Fecha de creacion del fichero original
                creationTime=util.format_date_time(
                    time.ctime(os.stat(incomingFile).st_ctime)),
                # Fecha de creacion del fichero copiado
                copyTime=util.format_date_time(time.ctime(dataFile.st_ctime)),
                fileType=util.get_extension(destinationFile))

            forensicCase.append(fileCase)

        Comandos.log.debug('Copy files to destination finished')

        # TODO se podran eliminar los antiguos una vez copiados?

        Comandos.log.debug("Registers: " + pprint.pformat(forensicCase))

        # Conectar a bbdd, creando la tabla "filecase" si no existe, y guardar todos los casos
        Comandos.log.info('Inserting metadata in database: %s', forensicCase)

        try:
            model.db.connect()
            model.db.create_table(model.FileCase, safe=True)
            with model.db.transaction():
                [fileCase.save(force_insert=True) for fileCase in forensicCase]
        except:
            Comandos.log.error('Database error, aborting operation')
            return
        finally:
            model.db.close()

        Comandos.log.info('Metadata inserted in database')

        # Guardar los ficheros en un fichero de texto con campos separados por |
        Comandos.log.info('Creating CSV file')
        csv = [fileCase.get_text_format() for fileCase in forensicCase]

        # Poner cada fichero del caso en una linea distinta y guardar en fichero
        # El nombre de fichero sera la composicion de caso, id y alias y con extension csv
        csvContent = util.join_list('\n', csv)
        csvName = self.caseName + '_' + str(
            self.idCase) + '_' + self.alias + '.csv'
        csvFile = open(csvName, 'w')
        csvFile.write(csvContent)
        csvFile.close()
        Comandos.log.info('CSV file created')
        Comandos.log.info('Copy finished')
Exemplo n.º 15
0
 def _get_output_dir_from_ckpt_dir(ckpt_dir):
     dir_basename = util.get_basename(ckpt_dir, include_path=False)
     return f"{self.TRACK_PATH}/outputs/{dir_basename}"
def registration(moving_img, fixed, reg_type):
    # pylint: disable= too-many-statements
    """Image2Image registration """
    reg = ants.Registration()

    path = moving_img.temp_data_path
    name = util.get_basename(moving_img.pre_processed_filepath) + '_' + reg_type
    moving_img.processed_filepath = path + name + '_RegTo' + str(moving_img.fixed_image) + '.nii.gz'
    moving_img.transform = path + name + '_RegTo' + str(moving_img.fixed_image) + '.h5'

    init_moving_transform = moving_img.init_transform
    if init_moving_transform is not None and os.path.exists(init_moving_transform):
        print("Found initial transform")
        # reg.inputs.initial_moving_transform = init_moving_transform
        reg.inputs.initial_moving_transform_com = False
        mask = util.transform_volume(moving_img.label_inv_filepath,
                                     moving_img.init_transform, label_img=True)
    else:
        reg.inputs.initial_moving_transform_com = True
        mask = moving_img.label_inv_filepath
    reg.inputs.collapse_output_transforms = True
    reg.inputs.fixed_image = moving_img.pre_processed_filepath
    reg.inputs.fixed_image_mask = mask
    reg.inputs.moving_image = fixed
    reg.inputs.num_threads = 8
    if reg_type == RIGID:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.metric = ['MI']
        reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.convergence_window_size = [5]
        reg.inputs.number_of_iterations = ([[10000, 10000, 10000, 10000, 10000]])
        reg.inputs.shrink_factors = [[5, 4, 3, 2, 1]]
        reg.inputs.smoothing_sigmas = [[4, 3, 2, 1, 0]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.transform_parameters = [(0.25,)]
        reg.inputs.use_histogram_matching = [True]
        reg.inputs.metric_weight = [1.0]
    elif reg_type == AFFINE:
        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[10000, 10000, 1000, 1000, 1000],
                                                [10000, 10000, 1000, 1000, 1000]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0]]
        reg.inputs.convergence_threshold = [1.e-6] + [-0.01]
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.transform_parameters = [(0.25,),
                                           (0.25,)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 2
        reg.inputs.use_histogram_matching = [False, True]
    elif reg_type == SYN:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.metric = ['MI', 'MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] * 2 + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[10000, 10000, 1000, 1000, 1000],
                                                [10000, 10000, 1000, 1000, 1000],
                                                [100, 75, 75, 75]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1], [5, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0], [4, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [100, 90, 75]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [4, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0], [1, 0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2 + [-0.01]
        reg.inputs.sigma_units = ['vox']*3
        reg.inputs.transform_parameters = [(0.25,),
                                           (0.25,),
                                           (0.2, 3.0, 0.0)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    else:
        raise Exception("Wrong registration format " + reg_type)
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.write_composite_transform = True
    reg.inputs.output_transform_prefix = path + name
    transform = path + name + 'InverseComposite.h5'

    if os.path.exists(moving_img.processed_filepath) and\
       os.path.exists(moving_img.transform):
        # generate_image(reg.inputs.output_warped_image, fixed)
        return moving_img
    print("starting registration")
    start_time = datetime.datetime.now()
    reg.run()
    print("Finished registration: ")
    print(datetime.datetime.now() - start_time)

    util.transform_volume(moving_img.pre_processed_filepath, transform,
                          outputpath=moving_img.processed_filepath)
    shutil.copy(transform, moving_img.transform)
    util.generate_image(moving_img.processed_filepath, fixed)

    return moving_img
Exemplo n.º 17
0
def process_tracts(folder):
    """ Post process data tumor volume"""
    util.setup(folder)
    print(folder)
    thres = 0.75
    atlas_paths = ["/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Arcuate/Arcuate_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Arcuate/Arcuate_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Anterior_Commissure/Anterior_Commissure.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Cerebellar/Cortico_Ponto_Cerebellum_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Cerebellar/Cortico_Ponto_Cerebellum_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Cerebellar/Inferior_Cerebellar_Pedunculus_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Cerebellar/Inferior_Cerebellar_Pedunculus_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Cerebellar/Superior_Cerebelar_Pedunculus_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Cerebellar/Superior_Cerebelar_Pedunculus_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Cingulum/Cingulum_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Cingulum/Cingulum_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Corpus_Callosum/Corpus_Callosum.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Fornix/Fornix.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Inferior_Network/Inferior_Longitudinal_Fasciculus_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Inferior_Network/Inferior_Longitudinal_Fasciculus_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Inferior_Network/Inferior_Occipito_Frontal_Fasciculus_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Inferior_Network/Inferior_Occipito_Frontal_Fasciculus_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Inferior_Network/Uncinate_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Inferior_Network/Uncinate_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Optic_Radiations/Optic_Radiations_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Optic_Radiations/Optic_Radiations_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Perisylvian/Anterior_Segment_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Perisylvian/Anterior_Segment_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Perisylvian/Long_Segment_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Perisylvian/Long_Segment_Right.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Perisylvian/Posterior_Segment_Left.nii",
                   "/mnt/b7cde2db-ac2d-4cbb-b2b0-a9b110f05d32/data/Segmentations/WM_tracts/Perisylvian/Posterior_Segment_Right.nii",
                   "/home/dahoiv/disk/Dropbox/Jobb/gbm/FINAL_RES_GBM_0919_09_06_2017/WM_tracts/Projections/Internal_Capsule.nii"
                   ]

    for atlas_path in atlas_paths:
        tract = util.get_basename(atlas_path)
        resample = slicer.registration.brainsresample.BRAINSResample(command=util.BRAINSResample_PATH,
                                                                     inputVolume=atlas_path,
                                                                     outputVolume=os.path.abspath(folder + tract + '.nii.gz'),
                                                                     referenceVolume=os.path.abspath(util.TEMPLATE_VOLUME))
        print(resample.cmdline)
        resample.run()

    conn = sqlite3.connect(util.DB_PATH, timeout=120)
    conn.text_factory = str
    cursor = conn.execute('''SELECT pid from Patient where study_id = ?''', ("qol_grade3,4", ))

    book = Workbook()
    sheet = book.active

    sheet.cell(row=1, column=1).value = 'PID'

    k = 2
    for pid in cursor:
        pid = pid[0]

        _id = conn.execute('''SELECT id from Images where pid = ?''', (pid, )).fetchone()
        if not _id:
            print("---No data for ", pid)
            continue
        _id = _id[0]

        _filepath = conn.execute("SELECT filepath_reg from Labels where image_id = ?",
                                 (_id, )).fetchone()[0]
        if _filepath is None:
            print("No filepath for ", pid)
            continue

        tumor_data = nib.load(util.DATA_FOLDER + _filepath).get_data()

        sheet.cell(row=k, column=1).value = pid
        m = 1
        for atlas_path in atlas_paths:
            tract = util.get_basename(atlas_path)
            if 'Internal_Capsule' not in tract:
                continue
            m += 1
            sheet.cell(row=1, column=m).value = tract
            atlas_data = nib.load(folder + tract + '.nii.gz').get_data()
            union_data = atlas_data * tumor_data

            sheet.cell(row=k, column=m).value = '1' if np.max(union_data) >= thres else '0'
        k += 1

    book.save("brain_tracts_Internal_Capsule.xlsx")