def run_onthisthread(self): """ Run the Zprojection This is not in run, so we can bypass run() if we don't want to start a new thread """ print 'zproject thread id', QtCore.QThread.currentThreadId() if os.path.isfile(self.zprojection_output) is False or self.force: print "Computing z-projection..." if len(self.imglist) < 1: self.emit(QtCore.SIGNAL('update(QString)'), "No recon images found!") reader = Imreader(self.imglist) try: im = reader.imread(self.imglist[0]) except HarpDataError as e: self.update.emit(e.message) # emit back to differnt thread. or... raise # reraise for autocrop, on same thread imdims = im.shape dtype = im.dtype # make a new list by removing every nth image sparse_filelist = sorted(self.imglist)[0::self.skip_num] print "performing z-projection on sparse file list" max_array = self.max_projection(sparse_filelist, imdims, dtype) imwrite(self.zprojection_output, max_array) self.emit(QtCore.SIGNAL('update(QString)'), "Z-projection finished")
def run_onthisthread(self): """ Run the Zprojection This is not in run, so we can bypass run() if we don't want to start a new thread """ print 'zproject thread id', QtCore.QThread.currentThreadId() if os.path.isfile(self.zprojection_output) is False or self.force: print "Computing z-projection..." if len(self.imglist) < 1: self.emit(QtCore.SIGNAL('update(QString)'), "No recon images found!") reader = Imreader(self.imglist) try: im = reader.imread(self.imglist[0]) except HarpDataError as e: self.update.emit( e.message) # emit back to differnt thread. or... raise # reraise for autocrop, on same thread imdims = im.shape dtype = im.dtype # make a new list by removing every nth image sparse_filelist = sorted(self.imglist)[0::self.skip_num] print "performing z-projection on sparse file list" max_array = self.max_projection(sparse_filelist, imdims, dtype) imwrite(self.zprojection_output, max_array) self.emit(QtCore.SIGNAL('update(QString)'), "Z-projection finished")
def bz2_nnrd(img_list, outfile, scan_name, update): """ """ reader = Imreader(img_list) first_image = reader.imread(img_list[0]) shape = list(first_image.shape) shape = [shape[1], shape[0]] shape.append(len(img_list)) print '++++==== bzp' tempnrrd = tempfile.TemporaryFile(mode="wb+") nrrd.write_nrrd_header(tempnrrd, shape, first_image.dtype, 3) compressor = bz2.BZ2Compressor() for i, f in enumerate(img_list): if i % 20 == 0: done = int((50.0 / len(img_list)) * i) update.emit('{} {}%'.format(scan_name, done)) img_arr = reader.imread(f) rawdata = img_arr.T.tostring(order='F') tempnrrd.write(rawdata) BLOCK_SIZE = 52428800 # Around 20 MB in memory at one time # TODO: Check its smaller than image size compressed_name = outfile + '.bz2' file_size = os.fstat(tempnrrd.fileno()).st_size tempnrrd.seek(0) bytes_read = 0 with open(compressed_name, 'wb') as fh_w: while True: block = tempnrrd.read(BLOCK_SIZE) bytes_read += BLOCK_SIZE done = int(50 + (50.0 / file_size) * bytes_read) if done >= 100: # The getsize might not be accurate? done = 99 update.emit('{} {}%'.format(scan_name, done)) if not block: break compressed = compressor.compress(block) if compressed: try: fh_w.write(compressed) except IOError: update.emit("Error in compression - job terminated") print('failed to write bzp chunk') return # Send any data being buffered by the compressor remaining = compressor.flush() while remaining: to_send = remaining[:BLOCK_SIZE] remaining = remaining[BLOCK_SIZE:] fh_w.write(to_send)
def auto_bounding_box(self, filelist): self.callback("Determining crop bounding box") z_proj_path = os.path.join(self.configOb.meta_path, "max_intensity_z.png") # Start with a z-projection zp = zproject.Zproject(filelist, z_proj_path, force=True) zp.update.connect(self.update_slot) zp.run_onthisthread() zp_im = ReadImage(z_proj_path) reader = Imreader(filelist) try: testimg = reader.imread(filelist[0]) except IOError as e: raise HarpDataError('Failed to read {}. Is it corrupt'.format( filelist[0])) datatype = testimg.dtype if datatype is np.uint16: outval = 65535 else: outval = 255 # Apply otsu threshold and remove all but largest component seg = OtsuThreshold(zp_im, 0, outval, 128) seg = ConnectedComponent(seg) # label non-background pixels seg = RelabelComponent( seg) # relabel components in order of ascending size # seg = seg == 1 # discard all but largest component # Get bounding box label_stats = LabelStatisticsImageFilter() label_stats.Execute(zp_im, seg) bbox = list( label_stats.GetBoundingBox(1)) # xmin, xmax, ymin, ymax (I think) # Padding self.imdims = testimg.shape padding = int(np.mean(self.imdims) * 0.04) bbox = self.pad_bounding_box(bbox, padding) # Callback! self.callback(tuple(bbox)) # Crop the z-projection and write to metadata zp_arr = GetArrayFromImage(zp_im) zp_crop = GetImageFromArray(zp_arr[bbox[2]:bbox[3], bbox[0]:bbox[1]]) WriteImage(zp_crop, os.path.join(self.configOb.meta_path, "crop_result.png")) return bbox
def auto_bounding_box(self, filelist): self.callback("Determining crop bounding box") z_proj_path = os.path.join(self.configOb.meta_path, "max_intensity_z.png") # Start with a z-projection zp = zproject.Zproject(filelist, z_proj_path, force=True) zp.update.connect(self.update_slot) zp.run_onthisthread() zp_im = ReadImage(z_proj_path) reader = Imreader(filelist) try: testimg = reader.imread(filelist[0]) except IOError as e: raise HarpDataError('Failed to read {}. Is it corrupt'.format(filelist[0])) datatype = testimg.dtype if datatype is np.uint16: outval = 65535 else: outval = 255 # Apply otsu threshold and remove all but largest component seg = OtsuThreshold(zp_im, 0, outval, 128) seg = ConnectedComponent(seg) # label non-background pixels seg = RelabelComponent(seg) # relabel components in order of ascending size # seg = seg == 1 # discard all but largest component # Get bounding box label_stats = LabelStatisticsImageFilter() label_stats.Execute(zp_im, seg) bbox = list(label_stats.GetBoundingBox(1)) # xmin, xmax, ymin, ymax (I think) # Padding self.imdims = testimg.shape padding = int(np.mean(self.imdims) * 0.04) bbox = self.pad_bounding_box(bbox, padding) # Callback! self.callback(tuple(bbox)) # Crop the z-projection and write to metadata zp_arr = GetArrayFromImage(zp_im) zp_crop = GetImageFromArray(zp_arr[bbox[2]:bbox[3], bbox[0]:bbox[1]]) WriteImage(zp_crop, os.path.join(self.configOb.meta_path, "crop_result.png")) return bbox
def max_projection(self, filelist, imdims, bit_depth): maxi = np.zeros(imdims, dtype=bit_depth) reader = Imreader(filelist) for count, file_ in enumerate(filelist): try: im_array = reader.imread(file_) except HarpDataError as e: self.update.emit(e.message) raise inds = im_array > maxi maxi[inds] = im_array[inds] status_str = "Z-project: " + str(count * 10) + "/" + str(len(self.imglist)) + " images processed" self.emit(QtCore.SIGNAL('update(QString)'), status_str) self.callback("Determining crop box ({:.1%})".format(count / len(filelist))) return maxi
def max_projection(self, filelist, imdims, bit_depth): maxi = np.zeros(imdims, dtype=bit_depth) reader = Imreader(filelist) for count, file_ in enumerate(filelist): try: im_array = reader.imread(file_) except HarpDataError as e: self.update.emit(e.message) raise inds = im_array > maxi maxi[inds] = im_array[inds] status_str = "Z-project: " + str(count * 10) + "/" + str( len(self.imglist)) + " images processed" self.emit(QtCore.SIGNAL('update(QString)'), status_str) self.callback("Determining crop box ({:.1%})".format( count / len(filelist))) return maxi
def run(self, auto=False): """ Perform a crop based on previously selected bounding box :return: """ # Get list of files #imglist = processing.getfilelist(self.in_dir) imglist = self.app_data.getfilelist(self.in_dir) if len(imglist) < 1: raise HarpDataError("no image files found in " + self.in_dir) # Get cropbox either automatically or manually cb = self.auto_bounding_box(imglist) if auto else self.calc_manual_crop() print cb # Rearrange dimensions for numpy slicing cropbox = (cb[2], cb[3], cb[0], cb[1]) print cropbox first = True outpathslist = [] reader = Imreader(imglist) for count, file_ in enumerate(imglist): try: im = reader.imread(file_) except IOError as e: raise HarpDataError("failed to read {}".format(file_)) else: if im.shape[0] < 1 or im.shape[1] < 1: raise HarpDataError('Cannot read file, {}'.format(file_)) if first: dimcheck = im.shape first = False else: if im.shape != dimcheck: raise HarpDataError("Input files have different shapes {} and {}". format(dimcheck, imglist[0], file_)) # try: # pass # #im[dimcheck] Check for indexing error as .shape is derived from header only if count % 20 == 0: if self.thread_terminate_flag.value == 1: raise HarpDataError('Cancelled') self.callback( "Cropping: {0}/{1} images".format(count, str(len(imglist)))) filename = os.path.basename(file_) crop_out = os.path.join(self.out_dir, filename) try: imcrop = im[cropbox[0]:cropbox[1], cropbox[2]: cropbox[3]] except IndexError as e: raise HarpDataError("Crop box out of range. Is {} corrupted?".format(filename)) if count < 1: # Set up the correct writer based on the first image to be written imwriter = Imwriter(crop_out) imwriter.imwrite(imcrop, crop_out) outpathslist.append(crop_out) self.callback("Success") return outpathslist
def resample(images, scale, outpath, scaleby_int, update_signal, thread_terminate_flag=Value('i', 0)): """ :param images: iterable or a directory :param scale: int. Factor to scale by :param outpath: path including image file extension :param scaleby_int bool: True-> scale by binning. False-> use cv2 interpolated scaling :return: """ temp_xy = tempfile.TemporaryFile(mode='wb+') temp_xyz = tempfile.TemporaryFile(mode='wb+') #Check if we have a directory with images or a list with images if type(images) is str: if os.path.isdir(images): img_path_list = get_img_paths(images) elif type(images) in [list, tuple]: img_path_list = images else: raise HarpDataError("HARP Resampler: resampler needs a direcory of images or a list of images") if len(img_path_list) < 1: raise HarpDataError("HARP Resampler: There are no images in the list or directory") #Get dimensions for the memory mapped raw xy file xy_scaled_dims = [len(img_path_list)] img_path_list = sorted(img_path_list) datatype = 'uint8' # default first = True count = 0 reader = Imreader(img_path_list) for img_path in img_path_list: count += 1 if count % 50 == 0: if thread_terminate_flag.value == 1: return pcnt_done = int(((100 / len(img_path_list)) * count) / 2) update_signal.emit("rescaling by {}: {}% done".format(scale, pcnt_done)) # Rescale the z slices z_slice_arr = reader.imread(img_path) # This might slow things doen by reasigning to the original array. Maybe we jsut need a differnt view on it if scaleby_int: z_slice_arr = _droppixels(z_slice_arr, scale, scale) z_slice_resized = cv2.resize(z_slice_arr, (0, 0), fx=1/scale, fy=1/scale, interpolation=cv2.INTER_AREA) if first: xy_scaled_dims.extend(z_slice_resized.shape) datatype = z_slice_resized.dtype first = False if windows: z_slice_resized.tofile(temp_xy.file) else: z_slice_resized.tofile(temp_xy) #create memory mapped version of the temporary xy scaled slices xy_scaled_mmap = np.memmap(temp_xy, dtype=datatype, mode='r', shape=tuple(xy_scaled_dims)) #Get dimensions for the memory mapped raw xyz file xyz_scaled_dims = [] first = True final_scaled_slices = [] #Scale in x_z plane count = 0 for y in range(xy_scaled_mmap.shape[1]): count += 1 if count % 50 == 0: if thread_terminate_flag.value == 1: return pcnt_done = int(((100 / xy_scaled_mmap.shape[1]) * count) / 2) + 50 update_signal.emit("rescaling by {}: {}% done".format(scale, pcnt_done)) xz_plane = xy_scaled_mmap[:, y, :] if scaleby_int: xz_plane = _droppixels(xz_plane, 1, scale) scaled_xz = cv2.resize(xz_plane, (0, 0), fx=1, fy=1/scale, interpolation=cv2.INTER_AREA) if first: first = False xyz_scaled_dims.append(xy_scaled_mmap.shape[1]) xyz_scaled_dims.append(scaled_xz.shape[0]) xyz_scaled_dims.append(scaled_xz.shape[1]) final_scaled_slices.append(scaled_xz) if windows: scaled_xz.tofile(temp_xyz.file) else: scaled_xz.tofile(temp_xyz) #create memory mapped version of the temporary xy scaled slices xyz_scaled_mmap = np.memmap(temp_xyz, dtype=datatype, mode='r', shape=tuple(xyz_scaled_dims)) nrrd.write(outpath, np.swapaxes(xyz_scaled_mmap.T, 1, 2)) temp_xyz.close() # deletes temp file temp_xyz.close()
def resample(images, scale, outpath, scaleby_int, update_signal, thread_terminate_flag=Value('i', 0), compress=False): """ :param images: iterable or a directory :param scale: int. Factor to scale by :param outpath: path including image file extension :param scaleby_int bool: True-> scale by binning. False-> use cv2 interpolated scaling :return: """ temp_xy = tempfile.TemporaryFile(mode='wb+') temp_xyz = tempfile.TemporaryFile(mode='wb+') #Check if we have a directory with images or a list with images if type(images) is str: if os.path.isdir(images): img_path_list = get_img_paths(images) elif type(images) in [list, tuple]: img_path_list = images else: raise HarpDataError("HARP Resampler: resampler needs a direcory of images or a list of images") if len(img_path_list) < 1: raise HarpDataError("HARP Resampler: There are no images in the list or directory") #Get dimensions for the memory mapped raw xy file xy_scaled_dims = [len(img_path_list)] img_path_list = sorted(img_path_list) datatype = 'uint8' # default first = True count = 0 reader = Imreader(img_path_list) for img_path in img_path_list: print img_path count += 1 if count % 50 == 0: if thread_terminate_flag.value == 1: return pcnt_done = int(((100 / len(img_path_list)) * count) / 2) update_signal.emit("rescaling by {}: {}% done".format(scale, pcnt_done)) # Rescale the z slices z_slice_arr = reader.imread(img_path) # This might slow things doen by reasigning to the original array. Maybe we jsut need a differnt view on it if scaleby_int: z_slice_arr = _droppixels(z_slice_arr, scale, scale) z_slice_resized = cv2.resize(z_slice_arr, (0, 0), fx=1/scale, fy=1/scale, interpolation=cv2.INTER_AREA) if first: xy_scaled_dims.extend(z_slice_resized.shape) datatype = z_slice_resized.dtype first = False if windows: z_slice_resized.tofile(temp_xy.file) else: z_slice_resized.tofile(temp_xy) # create memory mapped version of the temporary xy scaled slices xy_scaled_mmap = np.memmap(temp_xy, dtype=datatype, mode='r', shape=tuple(xy_scaled_dims)) # Get dimensions for the memory mapped raw xyz file xyz_scaled_dims = [] first = True # Scale in x_z plane count = 0 for y in range(xy_scaled_mmap.shape[1]): count += 1 if count % 50 == 0: if thread_terminate_flag.value == 1: return pcnt_done = int(((100 / xy_scaled_mmap.shape[1]) * count) / 2) + 50 update_signal.emit("rescaling by {}: {}% done".format(scale, pcnt_done)) xz_plane = xy_scaled_mmap[:, y, :] if scaleby_int: xz_plane = _droppixels(xz_plane, 1, scale) scaled_xz = cv2.resize(xz_plane, (0, 0), fx=1, fy=1/scale, interpolation=cv2.INTER_AREA) if first: first = False xyz_scaled_dims.append(xy_scaled_mmap.shape[1]) xyz_scaled_dims.append(scaled_xz.shape[0]) xyz_scaled_dims.append(scaled_xz.shape[1]) if windows: scaled_xz.tofile(temp_xyz.file) else: scaled_xz.tofile(temp_xyz) # create memory mapped version of the temporary xyz scaled slices xyz_scaled_mmap = np.memmap(temp_xyz, dtype=datatype, mode='r', shape=tuple(xyz_scaled_dims)) options = {} # if compress: # options['encoding'] = 'gzip' # Does not work. We get a header NRRD with no data nrrd.write(outpath, np.swapaxes(xyz_scaled_mmap.T, 1, 2), options) temp_xy.close() # deletes temp file temp_xyz.close()
def run(self, auto=False): """ Perform a crop based on previously selected bounding box :return: """ # Get list of files #imglist = processing.getfilelist(self.in_dir) imglist = self.app_data.getfilelist(self.in_dir) if len(imglist) < 1: raise HarpDataError("no image files found in " + self.in_dir) if auto: cb = self.auto_bounding_box(imglist) #rearange as dims come in a differenbt order from the different methods cropbox = (cb[2], cb[3], cb[0], cb[1]) else: cropbox = self.calc_manual_crop() cropbox = [cropbox[1], cropbox[3], cropbox[0], cropbox[2]] # rearrange for numpy slicing first = True outpathslist = [] reader = Imreader(imglist) for count, file_ in enumerate(imglist): try: im = reader.imread(file_) except IOError as e: raise HarpDataError("failed to read {}".format(file_)) else: if im.shape[0] < 1 or im.shape[1] < 1: raise HarpDataError('Cannot read file, {}'.format(file_)) if first: dimcheck = im.shape first = False else: if im.shape != dimcheck: raise HarpDataError( "Input files have different shapes {} and {}".format( dimcheck, imglist[0], file_)) # try: # pass # #im[dimcheck] Check for indexing error as .shape is derived from header only if count % 20 == 0: if self.thread_terminate_flag.value == 1: raise HarpDataError('Cancelled') self.callback("Cropping: {0}/{1} images".format( count, str(len(imglist)))) filename = os.path.basename(file_) crop_out = os.path.join(self.out_dir, filename) try: imcrop = im[cropbox[0]:cropbox[1], cropbox[2]:cropbox[3]] except IndexError as e: raise HarpDataError( "Crop box out of range. Is {} corrupted?".format(filename)) imwrite(crop_out, imcrop) outpathslist.append(crop_out) self.callback("success") return outpathslist