コード例 #1
0
 def move_section(self, stack, section_number, change):
     min_id = session.query(func.min(RawSection.section_number)).filter(RawSection.prep_id == stack)\
         .filter(RawSection.active == 1).scalar()
     max_id = session.query(func.max(RawSection.section_number)).filter(RawSection.prep_id == stack)\
         .filter(RawSection.active == 1).scalar()
     set_to = section_number
     """
      For both operations, you need to set the next/previous section to a dummy number,
      as there is a unique constraint on prep_id, section_number, channel.
     
      If we are moving a section to the left, set the preceding section number to the current section
      and the current section to the preceding. Two updates.
      Likewise, if we are moving it to right, set the next section number to the current section
      and the current section the to next. Two updates. 
      Only do either of these updates if there is an allowable active section number to move
      them to.
     """
     DUMMY_SECTION_NUMBER = 9999
     if change == -1:
         if section_number > min_id:
             set_to += change
             preceding_section = section_number - 1
             self.change_section_number(stack, preceding_section, DUMMY_SECTION_NUMBER)
             self.change_section_number(stack, section_number, set_to)
             self.change_section_number(stack, DUMMY_SECTION_NUMBER, section_number)
     else:
         if section_number < max_id:
             set_to += change
             next_section = section_number + 1
             self.change_section_number(stack, next_section, DUMMY_SECTION_NUMBER)
             self.change_section_number(stack, section_number, set_to)
             self.change_section_number(stack, DUMMY_SECTION_NUMBER, section_number)
コード例 #2
0
def fetch_and_run(prep_id, limit):
    try:
        animal = session.query(Animal).filter(Animal.prep_id == prep_id).one()
    except (NoResultFound):
        print('No results found for prep_id: {}.'.format(prep_id))
        sys.exit()

    slide_processor = SlideProcessor(animal, session)
    slide_processor.process_czi_dir()
コード例 #3
0
def get_list_of_landmarks_in_prep(prepid):
    query_result = session.query(LayerData)\
            .filter(LayerData.active.is_(True))\
            .filter(LayerData.prep_id == prepid)\
            .filter(LayerData.input_type_id == 2)\
            .filter(LayerData.person_id == 2)\
            .all()
    landmarks = [entryi.structure.abbreviation for entryi in query_result]
    return landmarks
コード例 #4
0
    def generate_stack_metadata(self):
        """
        There must be an entry in both the animal and histology and task tables
        The task table is necessary as that is filled when the CZI dir is scanned.
        If there are no czi and tif files, there is no point in running the pipeline on that stack
        Returns:
            a dictionary of stack information

        """
        for a, h in session.query(Animal, Histology).filter(Animal.prep_id == Histology.prep_id).all():
            resolution = session.query(func.max(ScanRun.resolution)).filter(ScanRun.prep_id == a.prep_id).scalar()

            tif_dir = os.path.join(ROOT_DIR, a.prep_id, 'tif')
            if (os.path.exists(tif_dir) and len(os.listdir(tif_dir)) > 0):
                self.all_stacks.append(a.prep_id)
                self.stack_metadata[a.prep_id] = {'stain': h.counterstain,
                                                  'cutting_plane': h.orientation,
                                                  'resolution': resolution,
                                                  'section_thickness': h.section_thickness}
        return self.stack_metadata
コード例 #5
0
def get_center_of_mass(brain, person_id=28, input_type_id=4):
    query_results = session.query(CenterOfMass)\
        .filter(CenterOfMass.active.is_(True))\
        .filter(CenterOfMass.prep_id == brain)\
        .filter(CenterOfMass.person_id == person_id)\
        .filter(CenterOfMass.input_type_id == input_type_id)\
        .all()
    center_of_mass = {}
    for row in query_results:
        structure = row.structure.abbreviation
        center_of_mass[structure] = np.array([row.x, row.y, row.section])
    return center_of_mass
コード例 #6
0
def query_brain_coms(brain, person_id=28, input_type_id=4):
    # default: person is bili, input_type is aligned
    rows = session.query(CenterOfMass)\
        .filter(CenterOfMass.active.is_(True))\
        .filter(CenterOfMass.prep_id == brain)\
        .filter(CenterOfMass.person_id == person_id)\
        .filter(CenterOfMass.input_type_id == input_type_id)\
        .all()
    row_dict = {}
    for row in rows:
        structure = row.structure.abbreviation
        row_dict[structure] = np.array([row.x, row.y, row.section])
    return row_dict
コード例 #7
0
def transform_and_add_dict(animal, person_id, row_dict, r=None, t=None):

    for abbrev, v in row_dict.items():
        x = v[0]
        y = v[1]
        section = v[2]
        structure = session.query(Structure).filter(
            Structure.abbreviation == func.binary(abbrev)).one()
        if r is not None:
            scan_run = session.query(ScanRun).filter(
                ScanRun.prep_id == animal).one()
            brain_coords = np.asarray([x, y, section])
            brain_scale = [scan_run.resolution, scan_run.resolution, 20]
            transformed = brain_to_atlas_transform(brain_coords,
                                                   r,
                                                   t,
                                                   brain_scale=brain_scale)
            x = transformed[0]
            y = transformed[1]
            section = transformed[2]

        print(animal, abbrev, x, y, section)
        add_center_of_mass(animal, structure, x, y, section, person_id)
コード例 #8
0
ファイル: get_com.py プロジェクト: eddyod/pipeline_utility
def get_com_dict(prep_id, person_id, input_type_id):
    query_results = session.query(LayerData)\
        .filter(LayerData.active.is_(True))\
        .filter(LayerData.prep_id == prep_id)\
        .filter(LayerData.person_id == person_id)\
        .filter(LayerData.input_type_id == input_type_id)\
        .filter(LayerData.layer == 'COM')\
        .all()
    center_of_mass = {}
    for row in query_results:
        structure = row.structure.abbreviation
        com = np.array([row.x, row.y, row.section])
        center_of_mass[structure] = com
    return center_of_mass
コード例 #9
0
def get_centers(animal, input_type_id):

    beth = 2
    rows = session.query(LayerData).filter(
        LayerData.active.is_(True))\
            .filter(LayerData.prep_id == animal)\
            .filter(LayerData.person_id == beth)\
            .filter(LayerData.layer == 'COM')\
            .filter(LayerData.input_type_id == input_type_id)\
            .all()
    row_dict = {}
    for row in rows:
        structure = row.structure.abbreviation
        row_dict[structure] = [row.x, row.y, row.section]
    return row_dict
コード例 #10
0
def make_meta(animal, remove):
    """
    Scans the czi dir to extract the meta information for each tif file
    Args:
        animal: the animal as primary key

    Returns: nothing
    """
    sqlController = SqlController(animal)
    fileLocationManager = FileLocationManager(animal)
    scan_id = sqlController.scan_run.id
    slides = session.query(Slide).filter(Slide.scan_run_id == scan_id).count()

    if slides > 0 and not remove:
        print(
            f'There are {slides} existing slides. You must manually delete the slides first.'
        )
        print(
            'Rerun this script as create_meta.py --animal DKXX --remove true')
        sys.exit()

    session.query(Slide).filter(Slide.scan_run_id == scan_id).delete(
        synchronize_session=False)
    session.commit()

    try:
        czi_files = sorted(os.listdir(fileLocationManager.czi))
    except OSError as e:
        print(e)
        sys.exit()

    section_number = 1
    for i, czi_file in enumerate(tqdm(czi_files)):
        extension = os.path.splitext(czi_file)[1]
        if extension.endswith('czi'):
            slide = Slide()
            slide.scan_run_id = scan_id
            slide.slide_physical_id = int(re.findall(r'\d+', czi_file)[1])
            slide.rescan_number = "1"
            slide.slide_status = 'Good'
            slide.processed = False
            slide.file_size = os.path.getsize(
                os.path.join(fileLocationManager.czi, czi_file))
            slide.file_name = czi_file
            slide.created = datetime.fromtimestamp(
                os.path.getmtime(
                    os.path.join(fileLocationManager.czi, czi_file)))

            # Get metadata from the czi file
            czi_file_path = os.path.join(fileLocationManager.czi, czi_file)
            metadata_dict = get_czi_metadata(czi_file_path)
            #print(metadata_dict)
            series = get_fullres_series_indices(metadata_dict)
            #print('series', series)
            slide.scenes = len(series)
            session.add(slide)
            session.flush()

            for j, series_index in enumerate(series):
                scene_number = j + 1
                channels = range(metadata_dict[series_index]['channels'])
                #print('channels range and dict', channels,metadata_dict[series_index]['channels'])
                channel_counter = 0
                width = metadata_dict[series_index]['width']
                height = metadata_dict[series_index]['height']
                for channel in channels:
                    tif = SlideCziTif()
                    tif.slide_id = slide.id
                    tif.scene_number = scene_number
                    tif.file_size = 0
                    tif.active = 1
                    tif.width = width
                    tif.height = height
                    tif.scene_index = series_index
                    channel_counter += 1
                    newtif = '{}_S{}_C{}.tif'.format(czi_file, scene_number,
                                                     channel_counter)
                    newtif = newtif.replace('.czi', '').replace('__', '_')
                    tif.file_name = newtif
                    tif.channel = channel_counter
                    tif.processing_duration = 0
                    tif.created = time.strftime('%Y-%m-%d %H:%M:%S')
                    session.add(tif)
                section_number += 1
        session.commit()
コード例 #11
0
def create_points(animal, section, layer, debug=False):

    fileLocationManager = FileLocationManager(animal)

    INPUT = os.path.join(fileLocationManager.prep, 'CH3', 'full_aligned')
    OUTPUT = os.path.join(fileLocationManager.prep, 'CH3', 'points', layer)
    os.makedirs(OUTPUT, exist_ok=True)

    sections = defaultdict(list)
    annotations = session.query(LayerData)\
        .filter(LayerData.layer == layer).filter(LayerData.prep_id == animal)

    for annotation in annotations:
        x = annotation.x
        y = annotation.y
        pts = [x, y]
        section = int(annotation.section)
        sections[section].append(pts)
        if debug:
            print(annotation.layer, x, y, section)

    for section, points in sections.items():
        if debug:
            print(section, len(points))
            continue
        if len(points) < 2:
            print(f'Section {section} has less than 2 points')
            continue
        pts = np.array(points)
        means = np.mean(pts, axis=0)
        mean_x = means[0]
        mean_y = means[1]
        D = pdist(pts)
        D = squareform(D)
        max_distance, [I_row, I_col] = np.nanmax(D), np.unravel_index(
            np.argmax(D), D.shape)

        if debug:
            print(f'means for section {section} {means}, pts {pts}')

        file = str(section).zfill(3) + '.tif'
        infile = os.path.join(INPUT, file)

        if not os.path.exists(infile) and not debug:
            print(infile, 'does not exist')
            continue

        outpath = os.path.join(OUTPUT, f'{section}.tif')

        if os.path.exists(outpath):
            print(outpath, 'exists')
            continue

        cmd = f'convert {infile} -normalize -auto-level {outpath}'
        if debug:
            print(cmd)
        else:
            proc = Popen(cmd, shell=True)
            proc.wait()

        cmd = f'convert {outpath} -fill transparent -stroke yellow'
        for point in points:
            endcircle = point[0] + (15 * 5)
            cmd += f' -draw "stroke-width 20 circle {point[0]},{point[1]},{endcircle},{point[1]}" '

        cmd += f' {outpath}'
        if debug:
            print(cmd)
        else:
            proc = Popen(cmd, shell=True)
            proc.wait()

        sizex = int(max_distance + 500)
        sizey = sizex
        offsetx = int(mean_x - max_distance / 2)
        offsety = int(mean_y - max_distance / 2)

        #cmd = f'convert {outpath} -gravity West -chop {chop}x0 {outpath}'
        cmd = f'convert {outpath} -crop {sizex}x{sizey}+{offsetx}+{offsety} {outpath}'
        if debug:
            print(cmd)
        else:
            proc = Popen(cmd, shell=True)
            proc.wait()

        pngfile = str(section).zfill(3) + '.png'
        pngpath = os.path.join(fileLocationManager.thumbnail_web, 'points',
                               layer)
        os.makedirs(pngpath, exist_ok=True)
        png = os.path.join(pngpath, pngfile)
        cmd = f'convert {outpath} -resize 12% {png}'
        if debug:
            print(cmd)
        else:
            proc = Popen(cmd, shell=True)
            proc.wait()
    if debug:
        print()
        print(sections)
コード例 #12
0
def get_active_prep_list():
    query_result = session.query(Animal.prep_id).filter(
        Animal.active.is_(True)).all()
    preps = [entryi[0] for entryi in query_result]
    preps.remove('Atlas')
    return preps
コード例 #13
0
def get_users():
    query_results = session.query(User).all()
    users = [entryi[0] for entryi in query_results]
    return users
コード例 #14
0
def get_input_types():
    query_results = session.query(ComType).all()
    com_type = [entryi[0] for entryi in query_results]
    return com_type