Exemplo n.º 1
0
                                log_level=logging.INFO)
            continue
        layer_num = get_layer_num(sec_num, args.initial_layer_num,
                                  args.reverse, max_sec_num)

        sec_relevant_mfovs = None
        if filtered_mfovs_map is not None:
            if (wafer_num, sec_num) not in filtered_mfovs_map:
                logger.report_event(
                    "WARNING: cannot find filtered data for (wafer, sec): {}, skipping"
                    .format((wafer_num, sec_num)),
                    log_level=logging.INFO)
                continue
            sec_relevant_mfovs = filtered_mfovs_map[wafer_num, sec_num]

        res = pool.apply_async(create_and_save_single_section,
                               (sec_relevant_mfovs, sections_map[sec_num],
                                layer_num, args.wafer_folder, out_ts_fname))
        pool_results.append(res)

    for res in pool_results:
        res.get()


if __name__ == '__main__':
    args = parse_args()

    logger.start_process('main', 'create_tilespecs.py', [args])
    create_tilespecs(args)
    logger.end_process('main ending', rh_logger.ExitCode(0))
Exemplo n.º 2
0
        with in_fs.open(fs.path.basename(in_ts_fname), 'rt') as in_f:
            in_ts = ujson.load(in_f)

        wafer_num = int(
            fs.path.basename(in_ts_fname).split('_')[0].split('W')[1])
        sec_num = int(
            fs.path.basename(in_ts_fname).split('.')[0].split('_')[1].split(
                'Sec')[1])
        section = Section.create_from_tilespec(in_ts,
                                               wafer_section=(wafer_num,
                                                              sec_num))
        stitcher.stitch_section(section)

        # Save the tilespec
        section.save_as_json(out_ts_fname)


#         out_tilespec = section.tilespec
#         import json
#         with open(out_ts_fname, 'wt') as out_f:
#             json.dump(out_tilespec, out_f, sort_keys=True, indent=4)

    del stitcher

if __name__ == '__main__':
    args = parse_args()

    logger.start_process('main', '2d_stitcher_driver.py', [args])
    run_stitcher(args)
    logger.end_process('main ending', rh_logger.ExitCode(0))
        'tile_files_or_dirs',
        metavar='tile_files_or_dirs',
        type=str,
        nargs='+',
        help=
        'a list of json files that need to be normalized or a directories of json files'
    )
    parser.add_argument('-o',
                        '--output_dir',
                        type=str,
                        help='an output directory (default: ./after_norm)',
                        default='./after_norm')
    parser.add_argument('-p',
                        '--processes_num',
                        type=int,
                        help='number of processes (default: 1)',
                        default=1)

    args = parser.parse_args()

    logger.start_process('normalize_coordinates', 'normalize_coordinates.py',
                         [args.tile_files_or_dirs, args.output_dir])
    pool = mp.Pool(processes=args.processes_num)

    normalize_coordinates(args.tile_files_or_dirs, args.output_dir, pool)

    pool.close()
    pool.join()

    logger.end_process('normalize_coordinates', rh_logger.ExitCode(0))
    #     stitcher.stitch_section(section) # will stitch and update the section
    #
    #     # Save the transforms to file
    #     import json
    #     print('Writing output to: {}'.format(out_fname))
    #     section.save_as_json(out_fname)
    # #     img_fnames, imgs = StackAligner.read_imgs(imgs_dir)
    # #     for img_fname, img, transform in zip(img_fnames, imgs, transforms):
    # #         # assumption: the output image shape will be the same as the input image
    # #         out_fname = os.path.join(out_path, os.path.basename(img_fname))
    # #         img_transformed = cv2.warpAffine(img, transform[:2,:], (img.shape[1], img.shape[0]), flags=cv2.INTER_AREA)
    # #         cv2.imwrite(out_fname, img_transformed)

    # Testing
    #    test_detector('/n/home10/adisuis/Harvard/git/rh_aligner/tests/ECS_test9_cropped/images/010_S10R1', conf_fname, 8, 500)

    logger.start_process('main', 'stitcher.py', [section_dir, conf_fname])
    section = ThumbnailsSection.create_from_full_thumbnail_coordinates(
        section_dir, section_num, processes_num=processes_num)
    conf = Stitcher.load_conf_from_file(conf_fname)
    stitcher = Stitcher(conf)
    stitcher.stitch_section(
        section)  # will stitch and update the section tiles' transformations

    # render the stitched section
    ds_rate = 1.0 / 8
    render_section(out_jpg_fname, section, ds_rate)

    # TODO - output the section
    logger.end_process('main ending', rh_logger.ExitCode(0))
Exemplo n.º 5
0
                                                               2].T + noiseA

    transformB = np.array([[np.cos(50), -np.sin(50), 600],
                           [np.sin(50), np.cos(50), 250], [0., 0., 1.]])
    ptsB_1 = np.random.uniform(0, 800, ((100, 2)))
    noiseB = np.random.uniform(-7, 7, ptsB_1.shape)
    ptsB_2 = np.dot(ptsB_1, transformB[:2, :2].T) + transformB[:2,
                                                               2].T + noiseB

    # Add outliers
    ptsC_1 = np.random.uniform(0, 800, ((5, 2)))
    noiseC = np.random.uniform(-3, 3, ptsC_1.shape)
    ptsC_2 = ptsC_1 + noiseC

    # stack the matches
    pts1 = np.vstack((ptsA_1, ptsB_1, ptsC_1))
    pts2 = np.vstack((ptsA_2, ptsB_2, ptsC_2))

    logger.start_process('main', 'affine_transforms_grouper.py', [])
    # create the clusterer
    grouper = AffineTransformsGrouper()

    groups_masks, outlier_mask = grouper.group_matches(pts1, pts2)

    print("Found {} groups masks".format(len(groups_masks)))
    for gm_idx, gm in enumerate(groups_masks):
        print("Group masks {}: size:{}".format(gm_idx, np.sum(gm)))

    print("Found {} outlier matches".format(np.sum(outlier_mask)))
    logger.end_process('main ending', rh_logger.ExitCode(0))
Exemplo n.º 6
0
    # TODO - Should be done in a parallel fashion
    for ts_fname in secs_ts_fnames:
        with open(ts_fname, 'rt') as in_f:
            tilespec = ujson.load(in_f)

        wafer_num = int(os.path.basename(ts_fname).split('_')[0].split('W')[1])
        sec_num = int(
            os.path.basename(ts_fname).split('.')[0].split('_')[1].split('Sec')
            [1])
        sections.append(
            Section.create_from_tilespec(tilespec,
                                         wafer_section=(wafer_num, sec_num)))

    logger.report_event("Initializing aligner", log_level=logging.INFO)
    aligner = StackAligner(conf)
    logger.report_event("Aligning sections", log_level=logging.INFO)
    aligner.align_sections(
        sections)  # will align and update the section tiles' transformations

    del aligner

    logger.end_process('main ending', rh_logger.ExitCode(0))


if __name__ == '__main__':
    args = parse_args()

    logger.start_process('main', 'align_3d_tilespecs_list.py', [args])
    run_aligner(args)
    logger.end_process('main ending', rh_logger.ExitCode(0))
Exemplo n.º 7
0
            args.output_dir, 'W{}_Sec{}_montaged.json'.format(
                str(wafer_num).zfill(2),
                str(sec_num).zfill(3)))
        if os.path.exists(out_ts_fname):
            continue

        layer_num = get_layer_num(sec_num, args.initial_layer_num)

        print("Stitching {}".format(sec_dir))
        section = Section.create_from_full_image_coordinates(
            os.path.join(sec_dir, 'full_image_coordinates.txt'), layer_num)
        stitcher.stitch_section(section)

        # Save the tilespec
        section.save_as_json(out_ts_fname)


#         out_tilespec = section.tilespec
#         import json
#         with open(out_ts_fname, 'wt') as out_f:
#             json.dump(out_tilespec, out_f, sort_keys=True, indent=4)

    del stitcher

if __name__ == '__main__':
    args = parse_args()

    logger.start_process('main', 'stitch_2d_raw_folders_list.py', [args])
    run_stitcher(args)
    logger.end_process('main ending', rh_logger.ExitCode(0))
Exemplo n.º 8
0


if __name__ == '__main__':
    secs_ts_fnames = [
        '/n/home10/adisuis/Harvard/git_lichtmangpu01/mb_aligner/scripts/ECS_test9_cropped_010_S10R1.json',
        '/n/home10/adisuis/Harvard/git_lichtmangpu01/mb_aligner/scripts/ECS_test9_cropped_011_S11R1.json',
        '/n/home10/adisuis/Harvard/git_lichtmangpu01/mb_aligner/scripts/ECS_test9_cropped_012_S12R1.json',
        '/n/home10/adisuis/Harvard/git_lichtmangpu01/mb_aligner/scripts/ECS_test9_cropped_013_S13R1.json',
        '/n/home10/adisuis/Harvard/git_lichtmangpu01/mb_aligner/scripts/ECS_test9_cropped_014_S14R1.json'
    ]
    out_folder = './output_aligned_ECS_test9_cropped'
    conf_fname = '../../conf/conf_example.yaml'


    logger.start_process('main', 'aligner.py', [secs_ts_fnames, conf_fname])
    conf = StackAligner.load_conf_from_file(conf_fname)
    logger.report_event("Loading sections", log_level=logging.INFO)
    sections = []
    # TODO - Should be done in a parallel fashion
    for ts_fname in secs_ts_fnames:
        with open(ts_fname, 'rt') as in_f:
            tilespec = ujson.load(in_f)
        wafer_num = 1
        sec_num = int(os.path.basename(ts_fname).split('_')[-1].split('S')[1].split('R')[0])
        sections.append(Section.create_from_tilespec(tilespec, wafer_section=(wafer_num, sec_num)))
    logger.report_event("Initializing aligner", log_level=logging.INFO)
    aligner = StackAligner(conf)
    logger.report_event("Aligning sections", log_level=logging.INFO)
    aligner.align_sections(sections) # will align and update the section tiles' transformations
Exemplo n.º 9
0
            log_level=logging.WARN)
        missing_sections = [
            i for i in range(1, max(sorted_sec_keys)) if i not in sections_map
        ]
        logger.report_event("Missing sections: {}".format(missing_sections),
                            log_level=logging.WARN)

    logger.report_event("Outputing sections to list to: {}".format(
        args.output_file),
                        log_level=logging.INFO)

    with open(args.output_file, 'wt') as out_f:
        for sec_num in sorted_sec_keys:
            if isinstance(sections_map[sec_num], list):
                folder_name = os.sep.join(
                    sections_map[sec_num][0].split(os.sep)[:-2]
                )  # strip the mfov path, and the coordinates file name
            else:
                folder_name = os.sep.join(sections_map[sec_num].split(
                    os.sep)[:-1])  # strip the coordinates file name
            out_f.write(folder_name)
            out_f.write('\n')


if __name__ == '__main__':
    args = parse_args()

    logger.start_process('main', 'create_raw_folders_list.py', [args])
    create_raw_folders_list(args)
    logger.end_process('main ending', rh_logger.ExitCode(0))
Exemplo n.º 10
0
    @staticmethod
    def align_img_files(imgs_dir, conf, processes_num):
        # Read the files
        _, imgs = StackAligner.read_imgs(imgs_dir)

        aligner = StackAligner(conf, processes_num)
        return aligner.align_imgs(imgs)


if __name__ == '__main__':
    imgs_dir = '/n/coxfs01/paragt/Adi/R0/images_margin'
    conf_fname = '../conf_example.yaml'
    out_path = './output_imgs'
    processes_num = 8

    logger.start_process('main', 'aligner.py',
                         [imgs_dir, conf_fname, out_path, processes_num])
    conf = StackAligner.load_conf_from_file(conf_fname)
    transforms = StackAligner.align_img_files(imgs_dir, conf, processes_num)

    # Save the transforms to a temp output folder
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    print('Writing output to: {}'.format(out_path))
    img_fnames, imgs = StackAligner.read_imgs(imgs_dir)
    for img_fname, img, transform in zip(img_fnames, imgs, transforms):
        # assumption: the output image shape will be the same as the input image
        out_fname = os.path.join(out_path, os.path.basename(img_fname))
        img_transformed = cv2.warpAffine(img,
                                         transform[:2, :],
                                         (img.shape[1], img.shape[0]),
Exemplo n.º 11
0
    parser.add_argument("-s",
                        "--sec_num",
                        metavar="sec_num",
                        type=int,
                        help="The section number. (default: 1)",
                        default=1)

    return parser.parse_args(args)


def run_stitcher(args):
    # Make up a section number

    section = Section.create_from_full_image_coordinates(
        args.images_coords_file, args.sec_num)
    conf = Stitcher.load_conf_from_file(args.conf_fname)
    stitcher = Stitcher(conf)
    stitcher.stitch_section(section)  # will stitch and update the section

    # Save the transforms to file
    print('Writing output to: {}'.format(args.output_json))
    section.save_as_json(args.output_json)


if __name__ == '__main__':
    args = parse_args()

    logger.start_process('main', 'stitch_2d.py', [args])
    run_stitcher(args)
    logger.end_process('main ending', rh_logger.ExitCode(0))