Пример #1
0
    def findGaleries(self, tracklets):
        if self.settings.new_h5:
            file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.ch5".format(self.well))
            path_objects="/sample/0/plate/{}/experiment/{}/position/{}/object/primary__primary3".format(self.plate, self.well.split('_')[0],
                                                                                                         self.well.split('_')[1])
            path_boundingBox="/sample/0/plate/{}/experiment/{}/position/{}/feature/primary__primary3/bounding_box".format(self.plate, self.well.split('_')[0],
                                                                                                         self.well.split('_')[1])
        else:
            file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.hdf5".format(self.well))
            path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary".format(self.plate, self.well.split('_')[0])
            path_boundingBox="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary/bounding_box".format(self.plate, self.well.split('_')[0])
            
        objects = vi.readHDF5(file_, path_objects)
        bounding_boxes = vi.readHDF5(file_, path_boundingBox)
        boxes=defaultdict(list)
        
        for track in tracklets:
            lstFrames=sorted(track.lstPoints.keys(), key=itemgetter(0))
            k=0#permits to order the images
            
            for im, cell_id in lstFrames:
                where_=np.where((objects['time_idx']==im)&(objects['obj_label_id']==cell_id))
                boxes[im].append((track.id, k, bounding_boxes[where_]))
                k+=1

        return boxes
Пример #2
0
 def _getFeatures(self, elements):
     if self.settings.new_h5:
         file_=os.path.join(self.settings.hdf5Folder, "{}", 'hdf5', "{}.ch5")
         path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary3"
         path_features="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary3/object_features"
     else:
         file_=os.path.join(self.settings.hdf5Folder, "{}", 'hdf5', "{}.hdf5")
         path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary"
         path_features="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary/object_features"
     result=None; image_list=[]
     for plate in sorted(elements):
         for well in sorted(elements[plate]):
             objects = vi.readHDF5(file_.format(plate, well), path_objects.format(plate, well.split('_')[0]))
             features=vi.readHDF5(file_.format(plate, well), path_features.format(plate, well.split('_')[0]))
             
             object_initial=0; fr=0
             while object_initial==0:
                 object_initial = len(np.where(objects['time_idx']==fr)[0])
                 fr+=1
             
             for frame in sorted(elements[plate][well]):
                 for cell_id in sorted(elements[plate][well][frame]):
                     try:
                         line = np.where((objects['time_idx']==frame)&(objects['obj_label_id']==cell_id))[0]
                     except:
                         pdb.set_trace()
                     else:
                         if not np.any(np.isnan(features[line])):
                             result=features[line] if result is None else np.vstack((result, features[line]))
                             image_list.append((plate, self.settings.outputImage.format(plate, well.split('_')[0]," ", frame,  cell_id)))
     return result, tuple(image_list), object_initial
Пример #3
0
def test_writeAndReadImageHDF5():
    try:
        import h5py
    except:
        print("Warning: 'import h5py' failed, not executing HDF5 import/export tests")
        return
    
    # positive tests
    # write and read image
    im.writeHDF5(image, "hdf5test.hd5", "group/subgroup/imgdata")
    image_imp = im.readHDF5("hdf5test.hd5", "group/subgroup/imgdata")
    checkEqualData(image,image_imp)
    # write and read scalar image
    im.writeHDF5(scalar_image, "hdf5test.hd5", "group/subgroup/imgdata")
    scalar_image_imp = im.readHDF5("hdf5test.hd5", "group/subgroup/imgdata")
    scalar_image_imp = scalar_image_imp.dropChannelAxis()
    checkEqualData(scalar_image,scalar_image_imp)
    # write multiple sets and check if they are all there afterwards
    im.writeHDF5(image, "hdf5test.hd5", "group/subgroup/imgdata")
    im.writeHDF5(image, "hdf5test.hd5", "group/subgroup/imgdata2")
    image_imp1 = im.readHDF5("hdf5test.hd5", "group/subgroup/imgdata")
    image_imp2 = im.readHDF5("hdf5test.hd5", "group/subgroup/imgdata2")
    checkEqualData(image,image_imp1)
    checkEqualData(image,image_imp2)

    # negative tests
    # write and read image
    image_imp[1,1,1] = 100000
    checkUnequalData(image,image_imp)
    # write and read scalar image
    scalar_image_imp[1,1] = 100000
    checkUnequalData(scalar_image,scalar_image_imp)
Пример #4
0
    def findObjects(self, splits, siblings,isplits, compute_boxes=False):
        if self.settings.new_h5:
            file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.ch5".format(self.well))
            path_objects="/sample/0/plate/{}/experiment/{}/position/{}/object/primary__primary3".format(self.plate, self.well.split('_')[0],
                                                                                            self.well.split('_')[1])
            path_boundingBox="/sample/0/plate/{}/experiment/{}/position/{}/feature/primary__primary3/bounding_box".format(self.plate, self.well.split('_')[0],
                                                                                            self.well.split('_')[1])
            path_classif="/sample/0/plate/{}/experiment/{}/position/{}/feature/primary__primary3/object_classification/prediction".format(self.plate, self.well.split('_')[0],
                                                                                            self.well.split('_')[1])
            
        else:
            file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.hdf5".format(self.well))
            path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary".format(self.plate, self.well.split('_')[0])
            path_boundingBox="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary/bounding_box".format(self.plate, self.well.split('_')[0])
            path_classif="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary/object_classification/prediction".format(self.plate, self.well.split('_')[0])
            
        objects = vi.readHDF5(file_, path_objects)
        bounding_boxes = vi.readHDF5(file_, path_boundingBox) if compute_boxes else None
        classification = vi.readHDF5(file_, path_classif)
        
        boxes=defaultdict(list)
        score={t:[0,0] for t in splits}
        frames={t:[0,0] for t in splits}

        
        self._findCompleteObjects(splits, objects, classification, boxes, compute_boxes, bounding_boxes, siblings, score, frames)
        
        if self.settings.not_ending_track:
            score.update({t:[0, None] for t in isplits})
            frames.update({t:[0, self.movie_length-1] for t in isplits})            
            self._findIncompleteObjects(isplits, objects, classification, score, frames)
        
        cell_cycle_lengths={el: frames[el][1]-frames[el][0]+1 for el in frames}

        return boxes, cell_cycle_lengths, score
Пример #5
0
 def findObjects(self, thrivisions):
     if self.settings.new_h5:
         file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.ch5".format(self.well))
         path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary3".format(self.plate, self.well.split('_')[0])
         path_boundingBox="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary3/bounding_box".format(self.plate, self.well.split('_')[0])
     else:
         file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.hdf5".format(self.well))
         path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary".format(self.plate, self.well.split('_')[0])
         path_boundingBox="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary/bounding_box".format(self.plate, self.well.split('_')[0])
         
     objects = vi.readHDF5(file_, path_objects)
     bounding_boxes = vi.readHDF5(file_, path_boundingBox)
     boxes=defaultdict(list)
     for im in thrivisions:
         where_=np.where(np.array(objects, dtype=int)==im)[0]
         for thrivision in thrivisions[im]:
             if thrivision[-1]==-1:#we're looking at the mother cell
                 for w in where_:
                     if objects[w][1]==thrivision[1]:
                         boxes[im].append((thrivision[0], thrivision[1], np.array(list(bounding_boxes[w]))))
                         
             else:
                 local_box=np.zeros(shape=(3,4), dtype=int); k=0
                 for w in where_:
                     if np.any(thrivision[1:]==objects[w][1]):
                         local_box[k]=np.array(list(bounding_boxes[w]))
                         k+=1
                 boxes[im].append((thrivision[0],0, np.array([min(local_box[:,0]), max(local_box[:,1]), min(local_box[:,2]), max(local_box[:,3])]) ))
     return boxes
Пример #6
0
def test_writeAndReadVolumeHDF5():
    try:
        import h5py
    except:
        return
    
    # positive tests
    # write and read volume
    im.writeHDF5(volume256, "hdf5test.hd5", "group/subgroup/voldata")
    volume256_imp = im.readHDF5("hdf5test.hd5", "group/subgroup/voldata")
    checkEqualData(volume256,volume256_imp)
    # write and read binary volume
    im.writeHDF5(volumeFloat, "hdf5test.hd5", "group/subgroup/voldata")
    volumeFloat_imp = im.readHDF5("hdf5test.hd5", "group/subgroup/voldata")
    checkEqualData(volumeFloat.transposeToDefaultOrder(), volumeFloat_imp)
    # write multiple sets and check if they are all there afterwards
    im.writeHDF5(volume256, "hdf5test.hd5", "group/subgroup/voldata")
    im.writeHDF5(volume256, "hdf5test.hd5", "group/subgroup/voldata2")
    volume256_imp1 = im.readHDF5("hdf5test.hd5", "group/subgroup/voldata")
    volume256_imp1 = volume256_imp1.dropChannelAxis()
    volume256_imp2 = im.readHDF5("hdf5test.hd5", "group/subgroup/voldata2")
    volume256_imp2 = volume256_imp2.dropChannelAxis()
    checkEqualData(volume256,volume256_imp1)
    checkEqualData(volume256,volume256_imp2)

    # negative tests
    # write and read volume
    volume256_imp[1,1,1] = 100000
    checkUnequalData(volume256,volume256_imp)
    # write and read binary volume
    volumeFloat_imp[1,1,1] = 100000
    checkUnequalData(volumeFloat.transposeToDefaultOrder(), volumeFloat_imp)
Пример #7
0
 def classificationPerFrame(self):
     #if already computed I return it
     if self.settings.outputFile.format(self.plate[:10], self.well) in os.listdir(os.path.join(self.settings.outputFolder,self.plate)):
         f=open(os.path.join(self.settings.outputFolder,self.plate, self.settings.outputFile.format(self.plate[:10], self.well)))
         result = pickle.load(f); f.close()
         return result
     
     #if not I load it
     
     path_classif="/sample/0/plate/{}/experiment/{:>05}/position/{{}}/feature/primary__test/object_classification/prediction".format(self.plate, self.well)
     
     result=None
     
     for pos in self.pos_list:
         classification = vi.readHDF5(self.file_.format(pos), path_classif.format(pos))
         objects=vi.readHDF5(self.file_.format(pos), self.path_objects.format(pos))
         
         frames = sorted(list(set(objects['time_idx'])))
         if result is None:
             result=np.zeros(shape=(np.max(frames)+1, 18), dtype=float)
             
         for frame in frames:
             result[frame]+= np.bincount(classification['label_idx'][np.where(objects['time_idx']==frame)], minlength=18)
                 
     #putting UndefinedCondensed with Apoptosis
     result[:,11]+=result[:,16]
     result[:,16]=result[:,17]
     
     r=result[:,:-3]
     
     print r.shape
     
     return r
Пример #8
0
    def getObjective(self,objective, lengths, scores, tracklets):
        '''
        This function extracts information dealing with complete tracks, ie starting with a mitosis and ending with a mitosis.
        
        One chooses which information by setting objective in the setting file. This should be under the form :
        objective = {'name':[name of the info to extract], 'function':[function taking as argument objective in case one wants to divide or do smt],
        'features':[list of feature names that are going to be used in objective['function']}
        
        '''
        if self.settings.new_h5:
            file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.ch5".format(self.well))
            path_objects="/sample/0/plate/{}/experiment/{}/position/{}/object/primary__primary3".format(self.plate, self.well.split('_')[0],
                                                                                            self.well.split('_')[1])
            path_features="/sample/0/plate/{}/experiment/{}/position/{}/feature/primary__primary3/object_features".format(self.plate, self.well.split('_')[0],
                                                                                            self.well.split('_')[1])
            path_feature_names = "definition/feature/primary__primary3/object_features"

        else:
            file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.hdf5".format(self.well))
            path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary".format(self.plate, self.well.split('_')[0])
            path_features="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary/object_features".format(self.plate, self.well.split('_')[0])
            path_feature_names = "definition/feature/primary__primary/object_features"
            
        objects = vi.readHDF5(file_, path_objects)
        features =vi.readHDF5(file_, path_features)
        
        feature_names = vi.readHDF5(file_, path_feature_names)
        result = {}
        
        if objective in feature_names['name']:
            tab=features[:,np.where(feature_names['name']==objective)[0]]
        else:
            try:
                tabs={el: features[:,np.where(feature_names['name']==el)[0]] for el in objective['features']}
            except:
                raise ValueError
            else:
                tab = objective['function'](tabs)
        
        for track_id in scores:
            if ((scores[track_id][1] is not None and scores[track_id][0]>=1 and scores[track_id][1]>=1) or (scores[track_id][0]>=1 and scores[track_id][1]==None))\
                    and lengths[track_id]>=5:
                try:
                    traj=filter(lambda x:x.id==track_id, tracklets.lstTraj)[0]
                except IndexError:
                    pdb.set_trace()
                else:
                    result[track_id]=self._getObjective(objective, objects, tab, traj)
            else:
                del lengths[track_id]
        if type(objective)==dict:
            return {objective['name']: result, 'length':lengths}
        else:
            return {objective: result, 'length':lengths}
Пример #9
0
def distribute_folders(path, prefix,new_path,new_name):
    block_files = os.listdir(path)

    for file in block_files:
        number = file[len(prefix):-3]

        block=readHDF5(path+file,"data" )
        writeHDF5(block,new_path+"block{}/".format(number)+ new_name,"data" )
Пример #10
0
    def getObjective(self,objectives, tracklets):
        '''
        This function extracts information dealing with tracklets.
        
        One chooses which information by setting objective in the setting file. This should be under the form :
        objective = [
            {'name':[name of the info to extract], 
            'channel':[name of the channel on which to extract the info as contained in the hdf5 file],
            'function':[function taking as argument objective in case one wants to divide or do smt],
            'features':[list of feature names that are going to be used in objective['function']}
            ]
        
        '''
        file_=os.path.join(self.settings.allDataFolder, self.plate, 'hdf5', "{}".format(self.well))
        path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/{{}}".format(self.plate, self.well.split('_')[0])
        path_features="/sample/0/plate/{}/experiment/{}/position/1/feature/{{}}/object_features".format(self.plate, self.well.split('_')[0])
        path_feature_names = "definition/feature/primary__primary3/object_features"
        path_boundingBox="/sample/0/plate/{}/experiment/{}/position/1/feature/{{}}/bounding_box".format(self.plate, self.well.split('_')[0])
        path_centers="/sample/0/plate/{}/experiment/{}/position/1/feature/{{}}/center".format(self.plate, self.well.split('_')[0])

        feature_names = vi.readHDF5(file_, path_feature_names)
        result = defaultdict(dict)
        
        for objective in filter(lambda x:x['name']!='nuclear_pos_ratio', objectives):
            if 'function' not in objective:
#simply a question of extracting the right table
                objects = vi.readHDF5(file_, path_objects.format(objective['channel']))
                features =vi.readHDF5(file_, path_features.format(objective['channel']))
                tab=features[:,np.where(feature_names['name']==objective['feature'])[0]]
                for track in tracklets:  
                    result[objective['name']][track.id]=self._getObjective(objects, tab, track, 
                                channel=objective['channel'])[:,0,0]
            elif objective['name'] in ['nuclear_pos_min', 'nuclear_pos_max']:
                tabs=defaultdict(dict)
#meaning we're going to actually do calculations on extracted tables
                for el in objective['feature']:
                    if el=='center':
                        
                        objects = vi.readHDF5(file_, path_objects.format('secondary__primary3'))
                        center_=vi.readHDF5(file_, path_centers.format('secondary__primary3'))
                        
                        for track in tracklets:
                            tabs[track.id][el]=self._getObjective(objects, center_, track, channel='secondary__primary3')
                            
                    elif el=='bounding_box':
                        objects = vi.readHDF5(file_, path_objects.format('primary__primary3'))
                        bb_=vi.readHDF5(file_, path_boundingBox.format('primary__primary3'))
                        
                        for track in tracklets:
                            tabs[track.id][el]=self._getObjective(objects, bb_, track, channel='primary__primary3')
                        
                    else:
                        raise ValueError
                
                result[objective['name']] = {el :objective['function'](tabs[el]) for el in sorted(tabs.keys())} 
        
        for track_id in result['nuclear_pos_min']:
            result['nuclear_pos_ratio'][track_id]=result['nuclear_pos_min'][track_id]/result['nuclear_pos_max'][track_id]

        return result
Пример #11
0
    def _alreadyDone(self):
        path = self.TRACKING_PATH_NAME.format(self.plate, self.well)

        try:
            tracking_table = vi.readHDF5(self.ch5_file, path)
        except:
            return False
        else:
            return True
Пример #12
0
def relabelcons(path,new_path):
    files_list=os.listdir(path)

    for file in files_list:

        block=readHDF5(path+file,"data" )
        block=block.astype("uint32")
        labeled_volume,_,_ = vigra.analysis.relabelConsecutive(block)
        writeHDF5(labeled_volume,new_path+file,"data",compression="gzip")
Пример #13
0
    def _getObjectIds(self):
        path = self.OBJECT_PATH_NAME.format(self.plate, self.well, self.primary_channel_name)

        try:
            object_ids = vi.readHDF5(self.ch5_file, path)
        except Exception as e:
            raise e
        else:
            return object_ids
Пример #14
0
def rerun_connected_compts_in_folders(path,file_name,new_file_name):


    for i in [1]:

        print i
        block=readHDF5(path+"block{}/".format(i)+file_name,"data" )
        block=block.astype("uint32")
        labeled_volume = vigra.analysis.labelVolume(block)
        writeHDF5(labeled_volume,path+"block{}/".format(i)+new_file_name,"data",compression="gzip")
Пример #15
0
def save_comp(path,save_path):
    files = os.listdir(path)

    print files

    for i,file in enumerate(files):

        print file

        block = readHDF5(path + file, "data")
        writeHDF5(block,save_path + file, "data", compression="gzip")
Пример #16
0
def findingCellsInContact(setting_file='bgeig/settings/settings_bgeig.py'):
    settings=Settings(setting_file, globals())
    
    plates=os.listdir(settings.allDataFolder)
    result={}
    for plate in plates:
        wells=os.listdir(os.path.join(settings.allDataFolder, plate, 'hdf5'))
        result[plate]={}
        for well in wells:
            file_=os.path.join(settings.allDataFolder, plate, 'hdf5', well)
            well=well.split('.')[0]
            result[plate][well]=defaultdict(dict)
            
            path_nuclei_object="/sample/0/plate/{}/experiment/{}/position/{}/object/secondary__primary3".format(plate,well.split('_')[0],
                                                                                            well[-1])
            path_nuclei_centers="/sample/0/plate/{}/experiment/{}/position/{}/feature/secondary__primary3/center".format(plate, well.split('_')[0],
                                                                                            well[-1])
            path_cytoplasmic_object="/sample/0/plate/{}/experiment/{}/position/{}/object/primary__primary3".format(plate,well.split('_')[0],
                                                                                            well[-1])
            path_cytoplasmic_bb="/sample/0/plate/{}/experiment/{}/position/{}/feature/primary__primary3/bounding_box".format(plate, well.split('_')[0],
                                                                                            well[-1])
            
            nuclei_object=vi.readHDF5(file_, path_nuclei_object)
            nuclei_centers = vi.readHDF5(file_, path_nuclei_centers)
            
            cytoplasmic_object=vi.readHDF5(file_, path_cytoplasmic_object)
            cytoplasmic_bb=vi.readHDF5(file_, path_cytoplasmic_bb)
            
            frames=sorted(list(set(nuclei_object['time_idx'])))
            
            for frame in frames:
                cytoplasm_curr_bb = cytoplasmic_bb[np.where(cytoplasmic_object['time_idx']==frame)]
                cytoplasm_curr_id = cytoplasmic_object['obj_label_id'][np.where(cytoplasmic_object['time_idx']==frame)]
                
                for i,bb in enumerate(cytoplasm_curr_bb):
                    wh_=np.where((nuclei_object['time_idx']==frame) & (nuclei_centers['x']>bb['left']) & (nuclei_centers['x']<bb['right'])
                                    & (nuclei_centers['y']>bb['top']) & (nuclei_centers['y']<bb['bottom']))[0]
                    for nuclei_id in nuclei_object['obj_label_id'][wh_]:
                        result[plate][well][frame].update({nuclei_id:cytoplasm_curr_id[i]})
    return result
Пример #17
0
 def pheno_seq(self, tracklets,track_filter= (lambda x: x.fusion !=True and len(x.lstPoints)>11)):
     file_=os.path.join(self.settings.hdf5Folder, self.plate, 'hdf5', "{}.hdf5".format(self.well))
     path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary".format(self.plate, self.well.split('_')[0])
     path_classif="/sample/0/plate/{}/experiment/{}/position/1/feature/primary__primary/object_classification/prediction".format(self.plate, self.well.split('_')[0])
         
     objects = vi.readHDF5(file_, path_objects)
     classif = vi.readHDF5(file_, path_classif)
     result=[]
     
     for tracklet in filter(track_filter, tracklets):
         t=tracklet.lstPoints.keys(); t.sort(); 
         currR=[]
         for fr, cell_id in t:
             try:
                 where_=np.where((objects['time_idx']==fr)&(objects["obj_label_id"]==cell_id))[0]
                 currR.append(classif[where_])
             except IndexError:
                 currR.append(-1)
         currR = np.array(currR)['label_idx'][:,0]
         result.append(currR)
         
     return result
Пример #18
0
def rerun_connected_compts(path,new_path):

    block_files = os.listdir(path)

    print block_files
    for file in block_files:

        print file

        block=readHDF5(path+file,"data" )
        block=block.astype("uint32")
        labeled_volume = vigra.analysis.relabelVolume(block)
        writeHDF5(labeled_volume,new_path+file,"data",compression="gzip")
Пример #19
0
def compute_real_names_for_blocks(path_to_res,resolved=False):

    dict_blocks = {
        "block1": "x_5000_5520_y_2000_2520_z_3000_3520",
        "block2": "x_5000_5520_y_2000_2520_z_3480_4000",
        "block3": "x_5000_5520_y_2480_3000_z_3000_3520",
        "block4": "x_5480_6000_y_2000_2520_z_3000_3520",
        "block5": "x_5480_6000_y_2480_3000_z_3000_3520",
        "block6": "x_5480_6000_y_2000_2520_z_3480_4000",
        "block7": "x_5000_5520_y_2480_3000_z_3480_4000",
        "block8": "x_5480_6000_y_2480_3000_z_3480_4000",
    }
    for key in dict_blocks.keys():

        if resolved==True:
            block = readHDF5(path_to_res + key + "/result_resolved.h5", "data")
            writeHDF5(block, path_to_res + "finished_renamed/" + "result_resolved_"+dict_blocks[key]+".h5", "data")


        else:
            block=readHDF5(path_to_res+key+"/result.h5","data")
            writeHDF5(block, path_to_res + "finished_renamed/" + "result_"+dict_blocks[key]+".h5", "data")
Пример #20
0
 def classificationConcatenation(self):
     path_classif="/sample/0/plate/{}/experiment/{}/position/{{}}/feature/primary__test/object_classification/prediction".format(self.plate, self.well)
     
     result=None
     
     for pos in [1,2]:
         classification = vi.readHDF5(self.file_.format(pos), path_classif.format(pos))
         result = np.bincount(classification['label_idx'], minlength=18) if result == None\
                 else result + np.bincount(classification['label_idx'], minlength=18)
                 
     #putting UndefinedCondensed with Apoptosis
     result[11]+=result[16]
     result[16]=result[17]
     
     return result[:-1]
Пример #21
0
 def _load(self):
     if self.settings.new_h5:
         file_=os.path.join(self.settings.hdf5Folder, "{}", 'hdf5', "{}.ch5")
         path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary3"
     else:
         file_=os.path.join(self.settings.hdf5Folder, "{}", 'hdf5', "{}.hdf5")
         path_objects="/sample/0/plate/{}/experiment/{}/position/1/object/primary__primary"
     
     objects = vi.readHDF5(file_.format(self.plate, self.well), path_objects.format(self.plate, self.well.split('_')[0]))
     
     last_fr = np.max(objects['time_idx'])
     print "Last frame ", last_fr
     if self.interest == 'nb_object_final':
         return len(np.where(objects['time_idx']==last_fr)[0])
     raise ValueError
Пример #22
0
def compute_names(path,new_path,prefix_old,prefix_new):

    dict_blocks={
    "1": "x_5000_5520_y_2000_2520_z_3000_3520",
    "2": "x_5000_5520_y_2000_2520_z_3480_4000",
    "3": "x_5000_5520_y_2480_3000_z_3000_3520",
    "4": "x_5480_6000_y_2000_2520_z_3000_3520",
    "5": "x_5480_6000_y_2480_3000_z_3000_3520",
    "6": "x_5480_6000_y_2000_2520_z_3480_4000",
    "7": "x_5000_5520_y_2480_3000_z_3480_4000",
    "8": "x_5480_6000_y_2480_3000_z_3480_4000",
    }

    for key in dict_blocks.keys():

            block = readHDF5(path + prefix_old + key + ".h5", "data")
            writeHDF5(block, new_path + prefix_new + dict_blocks[key]+ ".h5", "data",compression="gzip")
Пример #23
0
def compute_names_of_results_in_folders(path,new_path,name,new_name):

    dict_blocks={
    "block1": "x_5000_5520_y_2000_2520_z_3000_3520",
    "block2": "x_5000_5520_y_2000_2520_z_3480_4000",
    "block3": "x_5000_5520_y_2480_3000_z_3000_3520",
    "block4": "x_5480_6000_y_2000_2520_z_3000_3520",
    "block5": "x_5480_6000_y_2480_3000_z_3000_3520",
    "block6": "x_5480_6000_y_2000_2520_z_3480_4000",
    "block7": "x_5000_5520_y_2480_3000_z_3480_4000",
    "block8": "x_5480_6000_y_2480_3000_z_3480_4000",
    }

    for key in dict_blocks.keys():

            block = readHDF5(path + key +"/" + name +".h5", "data")
            writeHDF5(block, new_path + new_name + "_" + dict_blocks[key]+ ".h5", "data",compression="gzip")
def orderHDF5(filename, plate, well):
    '''
    Returning the number of the frame following the frame number 0 in the hdf5 File
    
    Input:
    - filename: complete path to the hdf5 file as produced by CellCognition when extracting features from the raw data
    - 
    '''

    pathObjects = "/sample/0/plate/"+plate+"/experiment/"+well[:-3]+"/position/"+well[-1]+"/object/primary__primary"
    try:
        tabObjects = vi.readHDF5(filename, pathObjects)
    except:
        print 'No file'
        return 1000
    else:
        arr = np.array(tabObjects, dtype=int)
        return arr[np.where(arr>0)][0]#otherwise we forget the last frame
def countingHDF5(filename, plate, well):
    '''
    Counting the number of images contained in an hdf5 file as a proxy for the number of images of the original experiment
    
    Input:
    - filename: complete path to the hdf5 file as produced by CellCognition when extracting features from the raw data
    - 
    '''

    pathObjects = "/sample/0/plate/"+plate+"/experiment/"+well[:-3]+"/position/"+well[-1]+"/object/primary__primary"
    try:
        tabObjects = vi.readHDF5(filename, pathObjects)
    except:
        print 'No file'
        return 1000
    else:
        #changed to take into account that for some movies frames are not in the chronological order in the hdf5 file
        return np.max(np.array(tabObjects, dtype=int))+1#otherwise we forget the last frame
Пример #26
0
def kidney_looker(h5Folder = '/share/data40T/aschoenauer/drug_screen/results_August_2016/mito_joint_classifier'):
    '''
   Goal: identify if there exists certain siRNAs which are characterized by a high number of "Kidney" nuclei 
   Kidney corresponds to label 17
'''
    
    f=open('../data/mapping_2014/qc_export.txt', 'r')
    reader = csv.reader(f, delimiter='\t'); reader.next()
    whatList = []
    plateList = np.array(os.listdir(h5Folder))
    i=0
    for el in reader:
        if 'Valid' not in el[0] and int(el[0][2:6])<600 and el[-1]=="ok":
            plate = el[0].split('--')[0]
            well = el[0].split('--')[1]
            
            truePlate = plateList[np.where([plate in p for p in plateList])[0]][0]
            
            if 'hdf5' in os.listdir(os.path.join(h5Folder, truePlate)) and '00{}_01.ch5'.format(well) in os.listdir(os.path.join(h5Folder, truePlate, 'hdf5')):
                pathClassif = pathClassification.format(truePlate, '00{}'.format(well))
                try:
                    tabClassification = np.array(vi.readHDF5(os.path.join(h5Folder, truePlate, 'hdf5', '00{}_01.ch5'.format(well)), 
                                                         pathClassif), dtype=int)
                except IOError:
                    print "Error ", truePlate, well
                    continue
                
                kidneys = np.bincount(tabClassification, minlength=18)[-1]/float(tabClassification.shape[0])
                
                
                if el[2]=='scrambled':
                    whatList.append((truePlate, well, 'CT', kidneys))
                else:
                    whatList.append((truePlate, well, el[1], kidneys))
                i+=1
                if i%1000==0:
                    print "Done ", i
                    
    return pandas.DataFrame.from_records(whatList, columns = ('Plate', 'Well', 'siRNA', 'KidneyPerc'))
                    
            
Пример #27
0
 def DS_usable(self):
     '''
     Checking for over-exposed experiments or with low cell count
     '''
     #i. Opening file to see qc
     f=open(os.path.join(self.settings.result_dir, 'processedDictResult_P{}.pkl'.format(self.plate)))
     d=pickle.load(f); f.close()
     
     if int(self.well) not in d['FAILED QC']:
         return True
     
     if int(self.well) in d['FAILED QC'] and d[int(self.well)]['cell_count'][0]>50:
         print "Intensity QC failed"
         return False
     
     c=0
     for pos in [1,2]:
         tab=vi.readHDF5(self.file_.format(pos), self.path_objects.format(pos))
         c+=np.where(tab['time_idx']==0)[0].shape[0]
         
     if c<50:
         return False
     return True
Пример #28
0
    def loadData(self, plateModel, wellList):
        '''
       This should return a nb wells x nb replicates x 16 matrix of percentages of phenotypes for control wells
'''
        plates = self.plateList[np.where([plateModel in p for p in self.plateList])[0]]
        res = None

        for plate in plates:
            for well in wellList:
                if not well in self.QC[qc_trsf(plate)]:
                    continue
                if not 'hdf5' in os.listdir(os.path.join(self.raw_result_dir, plate))\
                            or not '00{}_01.ch5'.format(well) in os.listdir(os.path.join(self.raw_result_dir, plate, 'hdf5')):
                    print 'No H5 file ', plate, well
                    continue
                filename = os.path.join(self.raw_result_dir, plate, 'hdf5', '00{}_01.ch5'.format(well))
                    
                pathClassif = self.pathClassif.format(plate, '00{}'.format(well))
                try:
                    tabClassification = np.array(vi.readHDF5(filename, pathClassif), dtype=int)
                except ValueError:
                    return None
                
                r = np.bincount(tabClassification, minlength=18)/float(tabClassification.shape[0])
                res = r if res is None else np.vstack((res, r))
                
        #Deleting Out of focus and Artefact nuclei to do the tests
        try:
            res = np.delete(res, [15,16, 17], 1)
        except IndexError:
            try:
                res=res[np.newaxis,:]
                res = np.delete(res, [15,16,17], 1)
            except:
                return None
                
        return res
def arraySegmentation(plaque, puits, dir):
    fileHDF5 = dir[:-8]+"hdf5/"+puits+".hdf5"
    path = "/sample/0/plate/"+plaque+"/experiment/"+puits[:-3]+"/position/"+puits[-1]+"/image/region"
    tab = vi.readHDF5(fileHDF5, path)

    return tab
Пример #30
0
if __name__ == "__main__":


    path="/mnt/localdata1/amatskev/neuraldata/fib25_hdf5/PAPER/renamed_and_stiched/"
    new_path="/mnt/localdata1/amatskev/neuraldata/fib25_hdf5/for_figure/"
    path_names=["resolved_local_alba/","oracle_local_alba/","gt/","init_seg_alba/"]
    new_path_names=["res/","oracle/","gt/","init/"]

    for i,_ in enumerate(path_names):

        np=new_path+new_path_names[i]
        op=path+path_names[i]
        relabelcons(op,np)


    gt_big=readHDF5("/mnt/localdata1/amatskev/neuraldata/fib25_hdf5/gt/"
                    "gt_x_5000_6000_y_2000_3000_z_3000_4000.h5","data")

    # gt_big_relabeled=vigra.analysis.labelVolume(gt_big.astype("uint32"))

    # writeHDF5(gt_big_relabeled,"/mnt/localdata1/amatskev/neuraldata/fib25_hdf5/gt/"
    #                 "gt_x_5000_6000_y_2000_3000_z_3000_4000.h5","data",compression="gzip")





    path_resolved_alba="/mnt/localdata1/amatskev/neuraldata/fib25_hdf5/" \
                       "PAPER/blocks/fib25_FINAL_albatross_resolving/"

    path_renamed_resolved_alba="/mnt/localdata1/amatskev/neuraldata/fib25_hdf5/" \
                             "PAPER/renamed_and_stiched/resolved_local_alba/"