def load_subvolume_and_split_in_blocks(path_to_gt,save_path_blocks,prefix="prefix"): dict_blocks = { "block1": "x_5000_5520_y_2000_2520_z_3000_3520", "block2": "x_5000_5520_y_2000_2520_z_3480_4000", "block3": "x_5000_5520_y_2480_3000_z_3000_3520", "block4": "x_5480_6000_y_2000_2520_z_3000_3520", "block5": "x_5480_6000_y_2480_3000_z_3000_3520", "block6": "x_5480_6000_y_2000_2520_z_3480_4000", "block7": "x_5000_5520_y_2480_3000_z_3480_4000", "block8": "x_5480_6000_y_2480_3000_z_3480_4000", } # subvolume = readHDF5(path_to_subvolume, "data") for key in dict_blocks.keys(): name = dict_blocks[key] with h5py.File(path_to_gt) as f: subvolume = f["segmentation"][int(name[2:6]):int(name[7:11]), int(name[14:18]):int(name[19:23]), int(name[26:30]):int(name[31:35])] labeled_volume=vigra.analysis.labelVolume(subvolume.astype("uint32")) writeHDF5(labeled_volume, save_path_blocks + prefix + key + ".h5", "data",compression="gzip")
def savedata(data, path): """ Saves volume as a .tiff or .h5 file in path. :type data: numpy.ndarray :param data: Volume to be saved. :type path: str :param path: Path to the file where the volume is to be saved. Must end with .tiff or .h5. """ if path.endswith(".tiff") or path.endswith('.tif'): try: from vigra.impex import writeVolume except ImportError: raise ImportError("Vigra is needed to read/write TIFF volumes, but could not be imported.") writeVolume(data, path, '', dtype='UINT8') elif path.endswith(".h5"): try: from vigra.impex import writeHDF5 vigra_available = True except ImportError: vigra_available = False import h5py if vigra_available: writeHDF5(data, path, "/data") else: with h5py.File(path, mode='w') as hf: hf.create_dataset(name='data', data=data) else: raise NotImplementedError("Can't save: unsupported format. Supported formats are .tiff and .h5")
def distribute_folders(path, prefix,new_path,new_name): block_files = os.listdir(path) for file in block_files: number = file[len(prefix):-3] block=readHDF5(path+file,"data" ) writeHDF5(block,new_path+"block{}/".format(number)+ new_name,"data" )
def _saveResult(self): self.track_result = np.array(self.track_result, dtype=self.TRACK_DTYPE) path = self.TRACKING_PATH_NAME.format(self.plate, self.well) try: vi.writeHDF5(self.track_result, self.ch5_file, path) except Exception as e: raise e return 1
def relabelcons(path,new_path): files_list=os.listdir(path) for file in files_list: block=readHDF5(path+file,"data" ) block=block.astype("uint32") labeled_volume,_,_ = vigra.analysis.relabelConsecutive(block) writeHDF5(labeled_volume,new_path+file,"data",compression="gzip")
def rerun_connected_compts_in_folders(path,file_name,new_file_name): for i in [1]: print i block=readHDF5(path+"block{}/".format(i)+file_name,"data" ) block=block.astype("uint32") labeled_volume = vigra.analysis.labelVolume(block) writeHDF5(labeled_volume,path+"block{}/".format(i)+new_file_name,"data",compression="gzip")
def save_comp(path,save_path): files = os.listdir(path) print files for i,file in enumerate(files): print file block = readHDF5(path + file, "data") writeHDF5(block,save_path + file, "data", compression="gzip")
def rerun_connected_compts(path,new_path): block_files = os.listdir(path) print block_files for file in block_files: print file block=readHDF5(path+file,"data" ) block=block.astype("uint32") labeled_volume = vigra.analysis.relabelVolume(block) writeHDF5(labeled_volume,new_path+file,"data",compression="gzip")
def test_writeAndReadVolumeHDF5(): try: import h5py except: return # positive tests # write and read volume im.writeHDF5(volume256, "hdf5test.hd5", "group/subgroup/voldata") volume256_imp = im.readHDF5("hdf5test.hd5", "group/subgroup/voldata") checkEqualData(volume256,volume256_imp) # write and read binary volume im.writeHDF5(volumeFloat, "hdf5test.hd5", "group/subgroup/voldata") volumeFloat_imp = im.readHDF5("hdf5test.hd5", "group/subgroup/voldata") checkEqualData(volumeFloat.transposeToDefaultOrder(), volumeFloat_imp) # write multiple sets and check if they are all there afterwards im.writeHDF5(volume256, "hdf5test.hd5", "group/subgroup/voldata") im.writeHDF5(volume256, "hdf5test.hd5", "group/subgroup/voldata2") volume256_imp1 = im.readHDF5("hdf5test.hd5", "group/subgroup/voldata") volume256_imp1 = volume256_imp1.dropChannelAxis() volume256_imp2 = im.readHDF5("hdf5test.hd5", "group/subgroup/voldata2") volume256_imp2 = volume256_imp2.dropChannelAxis() checkEqualData(volume256,volume256_imp1) checkEqualData(volume256,volume256_imp2) # negative tests # write and read volume volume256_imp[1,1,1] = 100000 checkUnequalData(volume256,volume256_imp) # write and read binary volume volumeFloat_imp[1,1,1] = 100000 checkUnequalData(volumeFloat.transposeToDefaultOrder(), volumeFloat_imp)
def test_writeAndReadImageHDF5(): try: import h5py except: print("Warning: 'import h5py' failed, not executing HDF5 import/export tests") return # positive tests # write and read image im.writeHDF5(image, "hdf5test.hd5", "group/subgroup/imgdata") image_imp = im.readHDF5("hdf5test.hd5", "group/subgroup/imgdata") checkEqualData(image,image_imp) # write and read scalar image im.writeHDF5(scalar_image, "hdf5test.hd5", "group/subgroup/imgdata") scalar_image_imp = im.readHDF5("hdf5test.hd5", "group/subgroup/imgdata") scalar_image_imp = scalar_image_imp.dropChannelAxis() checkEqualData(scalar_image,scalar_image_imp) # write multiple sets and check if they are all there afterwards im.writeHDF5(image, "hdf5test.hd5", "group/subgroup/imgdata") im.writeHDF5(image, "hdf5test.hd5", "group/subgroup/imgdata2") image_imp1 = im.readHDF5("hdf5test.hd5", "group/subgroup/imgdata") image_imp2 = im.readHDF5("hdf5test.hd5", "group/subgroup/imgdata2") checkEqualData(image,image_imp1) checkEqualData(image,image_imp2) # negative tests # write and read image image_imp[1,1,1] = 100000 checkUnequalData(image,image_imp) # write and read scalar image scalar_image_imp[1,1] = 100000 checkUnequalData(scalar_image,scalar_image_imp)
def compute_names_of_results_in_folders(path,new_path,name,new_name): dict_blocks={ "block1": "x_5000_5520_y_2000_2520_z_3000_3520", "block2": "x_5000_5520_y_2000_2520_z_3480_4000", "block3": "x_5000_5520_y_2480_3000_z_3000_3520", "block4": "x_5480_6000_y_2000_2520_z_3000_3520", "block5": "x_5480_6000_y_2480_3000_z_3000_3520", "block6": "x_5480_6000_y_2000_2520_z_3480_4000", "block7": "x_5000_5520_y_2480_3000_z_3480_4000", "block8": "x_5480_6000_y_2480_3000_z_3480_4000", } for key in dict_blocks.keys(): block = readHDF5(path + key +"/" + name +".h5", "data") writeHDF5(block, new_path + new_name + "_" + dict_blocks[key]+ ".h5", "data",compression="gzip")
def compute_names(path,new_path,prefix_old,prefix_new): dict_blocks={ "1": "x_5000_5520_y_2000_2520_z_3000_3520", "2": "x_5000_5520_y_2000_2520_z_3480_4000", "3": "x_5000_5520_y_2480_3000_z_3000_3520", "4": "x_5480_6000_y_2000_2520_z_3000_3520", "5": "x_5480_6000_y_2480_3000_z_3000_3520", "6": "x_5480_6000_y_2000_2520_z_3480_4000", "7": "x_5000_5520_y_2480_3000_z_3480_4000", "8": "x_5480_6000_y_2480_3000_z_3480_4000", } for key in dict_blocks.keys(): block = readHDF5(path + prefix_old + key + ".h5", "data") writeHDF5(block, new_path + prefix_new + dict_blocks[key]+ ".h5", "data",compression="gzip")
def load_big_cut_out_subvolume(path,save_path,boundaries=np.s_[5000:6000,2000:3000,3000:4000],relabel=False): with h5py.File(path) as f: subvolume = f["data"][boundaries] print "loading from",path if relabel: labeled_volume = vigra.analysis.labelVolume(subvolume.astype("uint32")) print "--> RELABELED" print "saving subvolume to", save_path writeHDF5(labeled_volume,save_path,"data",compression="gzip") else: print "saving subvolume to", save_path writeHDF5(subvolume,save_path,"data",compression="gzip")
def compute_real_names_for_blocks(path_to_res,resolved=False): dict_blocks = { "block1": "x_5000_5520_y_2000_2520_z_3000_3520", "block2": "x_5000_5520_y_2000_2520_z_3480_4000", "block3": "x_5000_5520_y_2480_3000_z_3000_3520", "block4": "x_5480_6000_y_2000_2520_z_3000_3520", "block5": "x_5480_6000_y_2480_3000_z_3000_3520", "block6": "x_5480_6000_y_2000_2520_z_3480_4000", "block7": "x_5000_5520_y_2480_3000_z_3480_4000", "block8": "x_5480_6000_y_2480_3000_z_3480_4000", } for key in dict_blocks.keys(): if resolved==True: block = readHDF5(path_to_res + key + "/result_resolved.h5", "data") writeHDF5(block, path_to_res + "finished_renamed/" + "result_resolved_"+dict_blocks[key]+".h5", "data") else: block=readHDF5(path_to_res+key+"/result.h5","data") writeHDF5(block, path_to_res + "finished_renamed/" + "result_"+dict_blocks[key]+".h5", "data")
def savedata(data, path): """ Saves volume as a .tiff or .h5 file in path. :type data: numpy.ndarray :param data: Volume to be saved. :type path: str :param path: Path to the file where the volume is to be saved. Must end with .tiff or .h5. """ if path.endswith(".tiff") or path.endswith('.tif'): try: from vigra.impex import writeVolume except ImportError: raise ImportError( "Vigra is needed to read/write TIFF volumes, but could not be imported." ) writeVolume(data, path, '', dtype='UINT8') elif path.endswith(".h5"): try: from vigra.impex import writeHDF5 vigra_available = True except ImportError: vigra_available = False import h5py if vigra_available: writeHDF5(data, path, "/data") else: with h5py.File(path, mode='w') as hf: hf.create_dataset(name='data', data=data) else: raise NotImplementedError( "Can't save: unsupported format. Supported formats are .tiff and .h5" )
def load_train_subvolume_and_split_in_blocks(path, save_path_blocks,new_prefix="new_prefix_",relabel=False): """ 4000:5000, 2000:3000, 4000:4500 """ train_blocks = { "fm_train_block1": "x_4000_4500_y_2000_2500_z_4000_4500", "fm_train_block2": "x_4000_4500_y_2500_3000_z_4000_4500", "fm_train_block3": "x_4500_5000_y_2000_2500_z_4000_4500", "fm_train_block4": "x_4500_5000_y_2500_3000_z_4000_4500", } new_path=os.path.join(save_path_blocks,new_prefix[:-1]) if not os.path.exists(new_path): os.mkdir(new_path) for key in train_blocks.keys(): name = train_blocks[key] print new_prefix+key with h5py.File(path) as f: subvolume = f["data"][int(name[2:6]):int(name[7:11]), int(name[14:18]):int(name[19:23]), int(name[26:30]):int(name[31:35])] if relabel: labeled_volume = vigra.analysis.labelVolume(subvolume.astype("uint32")) print "-->RELABELED" writeHDF5(labeled_volume, new_path + "/" + new_prefix + key + ".h5", "data", compression="gzip") else: writeHDF5(subvolume, new_path + "/" + new_prefix + key + ".h5", "data", compression="gzip")
def ecrireTraining(ensembleTraj, fileToSave, LENGTH): solutions = {} fileS=open(fileToSave, "w") fileSHDF5 = fileToSave[:-3]+"hdf5" going = {} coming = {} appearCandidates = {} disappearCandidates = {} for trajectoire in ensembleTraj.lstTraj: frameLabels = sorted(trajectoire.lstPoints.iterkeys()) lastLabels = [] lastFrame = -1 lastMoveSplit = [False for x in range(100)] for f in frameLabels: frame = int(f[0]) nextFrame = frame+1 if nextFrame==LENGTH: break if lastLabels == []: lastLabels=trajectoire.findFrame(frame) if frame == lastFrame: continue nextLabels = trajectoire.findFrame(nextFrame) if not frame in solutions: solutions[frame]={} l = len(lastLabels) nextL = len(nextLabels) #print "FRAME :"+str(frame),l, nextL, lastLabels, nextLabels, trajectoire.numCellule if int(l)==int(nextL): if int(l)==1 and int(lastLabels[0])==-1: appearing = nextLabels[0] if not nextFrame in appearCandidates: appearCandidates[nextFrame]=[] appearCandidates[nextFrame].append(appearing) elif int(l)==1 and int(nextLabels[0])==-2: disappearing = lastLabels[0] if not frame in disappearCandidates: disappearCandidates[frame]=[] disappearCandidates[frame].append(disappearing) #continue elif int(l)==1: #print "MOVE" if not "move" in solutions[frame]: solutions[frame]["move"]=[[], []] solutions[frame]["move"][0].append(lastLabels) solutions[frame]["move"][1].append(nextLabels) if not nextFrame in coming: coming[nextFrame]=[] for label in nextLabels: coming[nextFrame].append(label) if not frame in going: going[frame]=[] for label in lastLabels: going[frame].append(label) else: print "problem", lastLabels, nextLabels, frame raise else: print "problem at frame "+str(frame), lastLabels, nextLabels raise lastFrame = frame if not lastMoveSplit[frame]: lastLabels = nextLabels if int(lastLabels[0])==-2: break #looking at merges : if two cells have the same target, it means it is not a move but a merge for f in solutions: if "move" not in solutions[f]: continue listeLabelsTargetMove = solutions[f]["move"][1] listeLabelsSourceMove = solutions[f]["move"][0] nextFrame = int(f+1) #print listeLabelsSourceMove, listeLabelsTargetMove newSourceMove = [] newTargetMove = [] merges = [] moves = [] for t_el in listeLabelsTargetMove: if listeLabelsTargetMove.count(t_el) > 1: merges.append(t_el) else: moves.append(t_el) indexToDel = [] for label1 in merges: # print label1 nextLabels=label1 lastLabels=[] for i in range(len(listeLabelsTargetMove)): if listeLabelsTargetMove[i]==label1: lastLabels.append(listeLabelsSourceMove[i][0]) indexToDel.append(i) # if not "merge" in solutions[f]: solutions[f]["merge"]=[[], []] if nextLabels in solutions[f]["merge"][1]: continue solutions[f]["merge"][0].append(lastLabels) solutions[f]["merge"][1].append(nextLabels) if not f in going: going[f]=[] if not nextFrame in coming: coming[nextFrame]=[] for label in nextLabels: coming[nextFrame].append(label) for label in lastLabels: going[f].append(label) #looking at splits : if two cells have the same source, it means it is not a move but a split splits = [] moves = [] for s_el in listeLabelsSourceMove: if listeLabelsSourceMove.count(s_el) > 1: splits.append(s_el) else: moves.append(s_el) # indexToDel = [] for label1 in splits: #print label1 lastLabels=label1 nextLabels=[] for i in range(len(listeLabelsSourceMove)): if listeLabelsSourceMove[i]==label1: indexToDel.append(i) nextLabels.append(listeLabelsTargetMove[i][0]) # if not "split" in solutions[f]: solutions[f]["split"]=[[], []] if lastLabels in solutions[f]["split"][0]: continue solutions[f]["split"][0].append(lastLabels) solutions[f]["split"][1].append(nextLabels) if not f in going: going[f]=[] if not nextFrame in coming: coming[nextFrame]=[] for label in nextLabels: coming[nextFrame].append(label) for label in lastLabels: going[f].append(label) for i in range(len(listeLabelsSourceMove)): if i not in indexToDel: newSourceMove.append(listeLabelsSourceMove[i]) newTargetMove.append(listeLabelsTargetMove[i]) # print listeLabelsSourceMove, listeLabelsTargetMove #print newSourceMove, newTargetMove solutions[f]["move"][1] = newTargetMove solutions[f]["move"][0] = newSourceMove #appear candidates for frame in appearCandidates.keys(): appearlist = appearCandidates[frame] for label in appearlist: if frame not in coming.keys() or (frame in coming.keys() and label not in coming[frame]): print "APPEAR "+str(label)+" on frame "+str(frame) if not "appear" in solutions[frame-1]: solutions[frame-1]["appear"]=[[], []] solutions[frame-1]["appear"][0].append([-1]) solutions[frame-1]["appear"][1].append(label) #disappear candidates for frame in disappearCandidates.keys(): disappearlist = disappearCandidates[frame] for label in disappearlist: if frame not in going.keys() or (frame in going.keys() and label not in going[frame]): print "DISAPPEAR "+str(label)+" on frame "+str(frame) if not "disappear" in solutions[frame]: solutions[frame]["disappear"]=[[], []] solutions[frame]["disappear"][0].append(label) solutions[frame]["disappear"][1].append([]) count={} out_merge = "\n MERGE \n" out_split = "\n SPLIT \n" for e in EVENTS: count[e]=0 for f in solutions: fileS.write("\n --------------------------FRAME "+str(f)+"------------------------------------") for e in solutions[f]: s = len(solutions[f][e][0]) #attention a l'ordre des axes : ici cx=nombre d'evenements, "taille" de l'evenement marche bien (de toutes facons lorsqu'on appelle la fonction writeHDF5 elle retablit l'ordre numpy) shapeS = (s,EVENTSSIZE[e][0],) shapeT = (s,EVENTSSIZE[e][1],) tabSource = v.VigraArray(shapeS,n.int32, axistags = v.VigraArray.defaultAxistags('cx'), init=True) tabTarget = v.VigraArray(shapeT,n.int32, axistags = v.VigraArray.defaultAxistags('cx'), init=True) tabSource = tabSource-1 tabTarget = tabTarget-1 path = "Training/{:0>6}/{}/".format(f, e) pathSource = path+"Source" pathTarget = path+"Target" fileS.write("\n EVENT "+str(e)+"*******\n SOURCES :\n") j=0 for label in solutions[f][e][0]: if isinstance(label, int) or isinstance(label, n.uint16) : length = 1 else: length=int(len(label)) fileS.write("\n label"+str(label)) if label == []: continue diff = EVENTSSIZE[e][0] - length if diff==0: tabSource[j]=label else: while diff>0: try: label.append(-1) except AttributeError: print "evenement "+str(e)+" probleme de taille entre evenement"+str(EVENTSSIZE[e][0])+" et le training set "+str(length) else: diff-=1 tabSource[j]=label # if e == "split":print e, f, label, j, tabSource j+=1 count[e]+=1 i=0 fileS.write("\n TARGETS :\n") for label in solutions[f][e][1]: if isinstance(label, int) or isinstance(label, n.uint16) : length = 1 else: length=int(len(label)) fileS.write("\n label"+str(label)) diff = EVENTSSIZE[e][1] - length if diff==0: tabTarget[i]=label else: while diff>0: try: label.append(-1) except AttributeError: print "evenement "+str(e)+" probleme de taille entre evenement"+str(EVENTSSIZE[e][1])+" et le training set "+str(length) else: diff-=1 tabTarget[i]=label i+=1 if i<>j: print "probleme : difference de longueurs entre Source et Target at fr "+str(f)+" pour l'evenement "+e vi.writeHDF5(tabSource, fileSHDF5, pathSource) vi.writeHDF5(tabTarget, fileSHDF5, pathTarget) if e =="merge": out_merge +="\n"+str(solutions[f][e][0])+" on frame "+str(f)+" to "+str(solutions[f][e][1]) if e=="split": out_split +="\n"+str(solutions[f][e][0])+" on frame "+str(f)+" to "+str(solutions[f][e][1]) print count, out_merge, out_split fileS.close() return solutions