Ejemplo n.º 1
0
def create_downsamples(animal, channel, suffix, downsample):
    fileLocationManager = FileLocationManager(animal)
    channel_outdir = f'C{channel}'
    first_chunk = calculate_chunks(downsample, 0)
    mips = [0, 1, 2, 3, 4, 5, 6, 7]

    if downsample:
        channel_outdir += 'T'
        mips = [0, 1]

    outpath = os.path.join(fileLocationManager.neuroglancer_data,
                           f'{channel_outdir}')
    outpath = f'file://{outpath}'
    if suffix is not None:
        outpath += suffix

    channel_outdir += "_rechunkme"
    INPUT_DIR = os.path.join(fileLocationManager.neuroglancer_data,
                             f'{channel_outdir}')

    if not os.path.exists(INPUT_DIR):
        print(f'DIR {INPUT_DIR} does not exist, exiting.')
        sys.exit()

    cloudpath = f"file://{INPUT_DIR}"
    _, workers = get_cpus()
    tq = LocalTaskQueue(parallel=workers)

    tasks = tc.create_transfer_tasks(cloudpath,
                                     dest_layer_path=outpath,
                                     chunk_size=first_chunk,
                                     mip=0,
                                     skip_downsamples=True)
    tq.insert(tasks)
    tq.execute()

    #mips = 7 shows good results in neuroglancer
    for mip in mips:
        cv = CloudVolume(outpath, mip)
        chunks = calculate_chunks(downsample, mip)
        factors = calculate_factors(downsample, mip)
        tasks = tc.create_downsampling_tasks(cv.layer_cloudpath,
                                             mip=mip,
                                             num_mips=1,
                                             factor=factors,
                                             preserve_chunk_size=False,
                                             compress=True,
                                             chunk_size=chunks)
        tq.insert(tasks)
        tq.execute()

    print("Done!")
 def add_downsampled_volumes(self, chunk_size=[128, 128, 64], num_mips=4):
     if self.precomputed_vol is None:
         raise NotImplementedError(
             'You have to call init_precomputed before calling this function.'
         )
     _, cpus = get_cpus()
     tq = LocalTaskQueue(parallel=cpus)
     tasks = tc.create_downsampling_tasks(
         self.precomputed_vol.layer_cloudpath,
         preserve_chunk_size=False,
         num_mips=num_mips,
         chunk_size=chunk_size,
         compress=True)
     tq.insert(tasks)
     tq.execute()
 def add_rechunking(self, outpath, downsample, chunks=None):
     if self.precomputed_vol is None:
         raise NotImplementedError(
             'You have to call init_precomputed before calling this function.'
         )
     cpus, _ = get_cpus()
     tq = LocalTaskQueue(parallel=cpus)
     outpath = f'file://{outpath}'
     if chunks is None:
         chunks = calculate_chunks(downsample, 0)
     tasks = tc.create_transfer_tasks(self.precomputed_vol.layer_cloudpath,
                                      dest_layer_path=outpath,
                                      chunk_size=chunks,
                                      skip_downsamples=True)
     tq.insert(tasks)
     tq.execute()
    def add_segmentation_mesh(self, shape=[448, 448, 448], mip=0):
        if self.precomputed_vol is None:
            raise NotImplementedError(
                'You have to call init_precomputed before calling this function.'
            )

        _, cpus = get_cpus()
        tq = LocalTaskQueue(parallel=cpus)
        tasks = tc.create_meshing_tasks(
            self.precomputed_vol.layer_cloudpath,
            mip=mip,
            max_simplification_error=40,
            shape=shape,
            compress=True)  # The first phase of creating mesh
        tq.insert(tasks)
        tq.execute()
        # It should be able to incoporated to above tasks, but it will give a weird bug. Don't know the reason
        tasks = tc.create_mesh_manifest_tasks(
            self.precomputed_vol.layer_cloudpath
        )  # The second phase of creating mesh
        tq.insert(tasks)
        tq.execute()
def create_neuroglancer(animal, channel, downsample, debug=False):
    fileLocationManager = FileLocationManager(animal)
    sqlController = SqlController(animal)
    channel_dir = f'CH{channel}'
    channel_outdir = f'C{channel}T_rechunkme'
    INPUT = os.path.join(fileLocationManager.prep, channel_dir, 'thumbnail_aligned')
    db_resolution = sqlController.scan_run.resolution
    resolution = int(db_resolution * 1000 / SCALING_FACTOR)
    workers, _ = get_cpus()
    chunks = calculate_chunks(downsample, -1)
    progress_id = sqlController.get_progress_id(downsample, channel, 'NEUROGLANCER')
    sqlController.session.close()
    if not downsample:
        INPUT = os.path.join(fileLocationManager.prep, channel_dir, 'full_aligned')
        channel_outdir = f'C{channel}_rechunkme'
        sqlController.set_task(animal, progress_id)

        if 'thion' in sqlController.histology.counterstain:
            sqlController.set_task(animal, RUN_PRECOMPUTE_NEUROGLANCER_CHANNEL_2_FULL_RES)
            sqlController.set_task(animal, RUN_PRECOMPUTE_NEUROGLANCER_CHANNEL_3_FULL_RES)

        resolution = int(db_resolution * 1000)

    OUTPUT_DIR = os.path.join(fileLocationManager.neuroglancer_data, f'{channel_outdir}')

    error = test_dir(animal, INPUT, downsample, same_size=True)
    if len(error) > 0 and not debug:
        print(error)
        sys.exit()

    os.makedirs(OUTPUT_DIR, exist_ok=True)
    files = sorted(os.listdir(INPUT))
    midpoint = len(files) // 2
    midfilepath = os.path.join(INPUT, files[midpoint])
    midfile = io.imread(midfilepath, img_num=0)
    height = midfile.shape[0]
    width = midfile.shape[1]
    num_channels = midfile.shape[2] if len(midfile.shape) > 2 else 1
    file_keys = []
    scales = (resolution, resolution, 20000)
    volume_size = (width, height, len(files))
    print('Volume shape:', volume_size)

    ng = NumpyToNeuroglancer(animal, None, scales, 'image', midfile.dtype, num_channels=num_channels, chunk_size=chunks)
    ng.init_precomputed(OUTPUT_DIR, volume_size, progress_id=progress_id)

    for i, f in enumerate(files):
        filepath = os.path.join(INPUT, f)
        file_keys.append([i,filepath])
        #ng.process_3channel([i, filepath])
    #sys.exit()

    start = timer()
    print(f'Working on {len(file_keys)} files with {workers} cpus')
    with ProcessPoolExecutor(max_workers=workers) as executor:
        if num_channels == 1:
            executor.map(ng.process_image, sorted(file_keys))
        else:
            executor.map(ng.process_3channel, sorted(file_keys))


    end = timer()
    print(f'Create volume method took {end - start} seconds')
    ng.precomputed_vol.cache.flush()
Ejemplo n.º 6
0
def create_mesh(animal, limit, mse):
    #chunks = calculate_chunks('full', -1)
    chunks = [64,64,1]
    scales = (10400, 10400, 20000)
    fileLocationManager = FileLocationManager(animal)
    INPUT = "/net/birdstore/Active_Atlas_Data/data_root/pipeline_data/DK52/preps/CH3/shapes"
    OUTPUT1_DIR = os.path.join(fileLocationManager.neuroglancer_data, 'mesh_input')
    OUTPUT2_DIR = os.path.join(fileLocationManager.neuroglancer_data, 'mesh')
    if 'ultraman' in get_hostname():
        if os.path.exists(OUTPUT1_DIR):
            shutil.rmtree(OUTPUT1_DIR)
        if os.path.exists(OUTPUT2_DIR):
            shutil.rmtree(OUTPUT2_DIR)

    files = sorted(os.listdir(INPUT))

    os.makedirs(OUTPUT1_DIR, exist_ok=True)
    os.makedirs(OUTPUT2_DIR, exist_ok=True)

    len_files = len(files)
    midpoint = len_files // 2
    midfilepath = os.path.join(INPUT, files[midpoint])
    midfile = io.imread(midfilepath)
    data_type = midfile.dtype
    #image = np.load('/net/birdstore/Active_Atlas_Data/data_root/pipeline_data/structures/allen/allen.npy')
    #ids = np.unique(image)
    ids = {'infrahypoglossal': 200, 'perifacial': 210, 'suprahypoglossal': 220}

    height, width = midfile.shape
    volume_size = (width, height, len(files)) # neuroglancer is width, height
    print('volume size', volume_size)
    ng = NumpyToNeuroglancer(animal, None, scales, layer_type='segmentation', 
        data_type=data_type, chunk_size=chunks)
    ng.init_precomputed(OUTPUT1_DIR, volume_size, progress_id=1)

    file_keys = []
    for i,f in enumerate(tqdm(files)):
        infile = os.path.join(INPUT, f)
        file_keys.append([i, infile])
        #ng.process_image([i, infile])
    #sys.exit()

    start = timer()
    workers, cpus = get_cpus()
    print(f'Working on {len(file_keys)} files with {workers} cpus')
    with ProcessPoolExecutor(max_workers=workers) as executor:
        executor.map(ng.process_image, sorted(file_keys), chunksize=1)
        executor.shutdown(wait=True)

    ng.precomputed_vol.cache.flush()

    end = timer()
    print(f'Create volume method took {end - start} seconds')


    ##### rechunk
    cloudpath1 = f"file://{OUTPUT1_DIR}"
    cv1 = CloudVolume(cloudpath1, 0)
    _, workers = get_cpus()
    tq = LocalTaskQueue(parallel=workers)
    cloudpath2 = f'file://{OUTPUT2_DIR}'

    tasks = tc.create_transfer_tasks(cloudpath1, dest_layer_path=cloudpath2, 
        chunk_size=[64,64,64], mip=0, skip_downsamples=True)

    tq.insert(tasks)
    tq.execute()

    ##### add segment properties
    cv2 = CloudVolume(cloudpath2, 0)
    cv2.info['segment_properties'] = 'names'
    cv2.commit_info()

    segment_properties_path = os.path.join(cloudpath2.replace('file://', ''), 'names')
    os.makedirs(segment_properties_path, exist_ok=True)

    info = {
        "@type": "neuroglancer_segment_properties",
        "inline": {
            "ids": [str(value) for key, value in ids.items()],
            "properties": [{
                "id": "label",
                "type": "label",
                "values": [str(key) for key, value in ids.items()]
            }]
        }
    }
    with open(os.path.join(segment_properties_path, 'info'), 'w') as file:
        json.dump(info, file, indent=2)

    ##### first mesh task, create meshing tasks
    workers, _ = get_cpus()
    tq = LocalTaskQueue(parallel=workers)
    mesh_dir = f'mesh_mip_0_err_{mse}'
    cv2.info['mesh'] = mesh_dir
    cv2.commit_info()
    tasks = tc.create_meshing_tasks(cv2.layer_cloudpath, mip=0, mesh_dir=mesh_dir, max_simplification_error=mse)
    tq.insert(tasks)
    tq.execute()
    ##### 2nd mesh task, create manifest
    tasks = tc.create_mesh_manifest_tasks(cv2.layer_cloudpath, mesh_dir=mesh_dir)
    tq.insert(tasks)
    tq.execute()
    
    print("Done!")
Ejemplo n.º 7
0
def run_offsets(animal, transforms, channel, downsample, masks, create_csv,
                allen):
    """
    This gets the dictionary from the above method, and uses the coordinates
    to feed into the Imagemagick convert program. This method also uses a Pool to spawn multiple processes.
    Args:
        animal: the animal
        transforms: the dictionary of file, coordinates
        limit: number of jobs
    Returns: nothing
    """
    fileLocationManager = FileLocationManager(animal)
    sqlController = SqlController(animal)
    channel_dir = 'CH{}'.format(channel)
    INPUT = os.path.join(fileLocationManager.prep, channel_dir,
                         'thumbnail_cleaned')
    OUTPUT = os.path.join(fileLocationManager.prep, channel_dir,
                          'thumbnail_aligned')

    if not downsample:
        INPUT = os.path.join(fileLocationManager.prep, channel_dir,
                             'full_cleaned')
        OUTPUT = os.path.join(fileLocationManager.prep, channel_dir,
                              'full_aligned')

    error = test_dir(animal, INPUT, downsample=downsample, same_size=True)
    if len(error) > 0 and not create_csv:
        print(error)
        sys.exit()

    if masks:
        INPUT = os.path.join(fileLocationManager.prep, 'rotated_masked')
        error = test_dir(animal, INPUT, full=False, same_size=True)
        if len(error) > 0:
            print(error)
            sys.exit()
        OUTPUT = os.path.join(fileLocationManager.prep,
                              'rotated_aligned_masked')

    os.makedirs(OUTPUT, exist_ok=True)
    progress_id = sqlController.get_progress_id(downsample, channel, 'ALIGN')
    sqlController.set_task(animal, progress_id)

    warp_transforms = create_warp_transforms(animal, transforms, 'thumbnail',
                                             downsample)
    ordered_transforms = OrderedDict(sorted(warp_transforms.items()))
    file_keys = []
    r90 = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
    for i, (file, T) in enumerate(ordered_transforms.items()):
        if allen:
            ROT_DIR = os.path.join(fileLocationManager.root, animal,
                                   'rotations')
            rotfile = file.replace('tif', 'txt')
            rotfile = os.path.join(ROT_DIR, rotfile)
            R_cshl = np.loadtxt(rotfile)
            R_cshl[0, 2] = R_cshl[0, 2] / 32
            R_cshl[1, 2] = R_cshl[1, 2] / 32
            R_cshl = R_cshl @ r90
            R_cshl = np.linalg.inv(R_cshl)
            R = T @ R_cshl
        infile = os.path.join(INPUT, file)
        outfile = os.path.join(OUTPUT, file)
        if os.path.exists(outfile) and not create_csv:
            continue

        file_keys.append([i, infile, outfile, T])

    if create_csv:
        create_csv_data(animal, file_keys)
    else:
        start = timer()
        workers, _ = get_cpus()
        print(f'Working on {len(file_keys)} files with {workers} cpus')
        with ProcessPoolExecutor(max_workers=workers) as executor:
            executor.map(process_image, sorted(file_keys))

        end = timer()
        print(f'Create cleaned files took {end - start} seconds total',
              end="\t")
        print(f' { (end - start)/len(file_keys)} per file')

    print('Finished')