def _can_read_file(self, path, channels_to_update=None): try: img = Img(path) try: print(img.has_c(), channels_to_update, img.shape, img.shape[-1]) if img.has_c() and channels_to_update is not None: # empty channel combo channels_to_update.clear() # add new channels to combo for chan in range(img.shape[-1]): channels_to_update.addItem(str(chan)) # deselect it channels_to_update.setCurrentIndex(-1) channels_to_update.update() if not img.has_c(): channels_to_update.clear() channels_to_update.setCurrentIndex(-1) channels_to_update.update() except: import traceback import logging logging.error(traceback.format_exc()) return img, True except: print('could not read image ' + path) return None, False
def __init__(self, *args, x=None, y=None, width=None, height=None, data=None, dimensions=None, opacity=1., stroke=0.65, **kwargs): self.isSet = False self.annotation = [] # should contain the objects for annotating imaging --> shapes and texts self.letter = None # when objects are swapped need change the letter if args: if len(args) == 1: self.filename = args[0] else: self.filename = None if x is None and y is None and width is not None and height is not None: super(Image2D, self).__init__(0, 0, width, height) self.isSet = True elif x is None and y is None and width is None and height is None: # print('in 0') self.img = Img(self.filename) self.qimage = self.img.getQimage() width = self.img.get_width() height = self.img.get_height() super(Image2D, self).__init__(0, 0, width, height) self.isSet = True elif x is not None and y is not None and width is not None and height is not None: self.img = None super(Image2D, self).__init__(x, y, width, height) self.isSet = True elif data is None: if self.filename is not None: self.img = Img(self.filename) self.qimage = self.img.getQimage() if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True elif data is not None: self.img = Img(data, dimensions=dimensions) # need width and height so cannot really be only a numpy stuff --> cause no width or height by default --> or need tags such as image type for dimensions self.qimage = self.img.getQimage() # need Image dimensions id data is not of type IMG --> could check that if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True self.stroke = stroke # DO I REALLY NEED STROKE self.opacity = opacity
def _toImg(self): # print(self.toBase64()) # this is ok if self.figure is not None: buf = self._toBuffer() if buf is None: return None buf.seek(0) im = Image.open(buf) # im.show() pix = np.array(im) # print(pix.shape) img = Img(pix, dimensions='hwc') # print(img.shape, pix.shape) buf.close() # should I get image width and height there ??? # im.show() return img return None
def process(self, input=None, mode=None, _DEBUG=False, _VISUAL_DEBUG=False, output_folder=tempfile.gettempdir(), output_name='handCorrection.tif', threshold=None, filter=None, correction_factor=2, **kwargs): if input is None: logger.error('no input image --> nothing to do') return # TODO test it with several images just to see if that works if isinstance(mode, str) and 'first' in mode: # return first channel only # shall I had a channel axis to it to avoid issues out = input[..., 0] # I do this to keep the ...hwc format... return out[..., np.newaxis] img_orig = input if not img_orig.has_c() or img_orig.shape[-1] != 7: # TODO in fact could do the fast mode still on a single image --> may be useful logger.error( 'image must have 7 channels to be used for post process') return img_orig if _DEBUG: Img(img_orig, dimensions='hwc').save( os.path.join(output_folder, 'raw_input.tif')) bckup_img_wshed = img_orig[..., 0].copy() if mode is not None and isinstance(mode, str): if 'ast' in mode: logger.debug('fast mode') img_orig[..., 0] += img_orig[..., 1] img_orig[..., 0] += img_orig[..., 2] img_orig = img_orig[..., 0] / 3 img_orig = np.reshape(img_orig, (*img_orig.shape, 1)) else: logger.debug('normal mode') else: logger.debug('normal mode') differing_bonds = np.zeros_like(img_orig) img_orig[..., 0] = segment_cells(img_orig[..., 0], min_threshold=0.02, min_unconnected_object_size=3) if img_orig.shape[-1] >= 5: img_orig[..., 1] = segment_cells(img_orig[..., 1], min_threshold=0.06, min_unconnected_object_size=6) img_orig[..., 2] = segment_cells(img_orig[..., 2], min_threshold=0.15, min_unconnected_object_size=12) img_orig[..., 3] = Img.invert(img_orig[..., 3]) img_orig[..., 3] = segment_cells(img_orig[..., 3], min_threshold=0.06, min_unconnected_object_size=6) img_orig[..., 4] = Img.invert(img_orig[..., 4]) img_orig[..., 4] = segment_cells(img_orig[..., 4], min_threshold=0.15, min_unconnected_object_size=12) if img_orig.shape[-1] == 7: img_orig[..., 5] = self.binarise(img_orig[..., 5], threshold=0.15) img_orig[..., 6] = Img.invert(img_orig[..., 6]) img_orig[..., 6] = self.binarise(img_orig[..., 6], threshold=0.1) if _DEBUG: Img(img_orig, dimensions='hwc').save( os.path.join(output_folder, 'thresholded_masks.tif')) # get watershed mask for all images for i in range(img_orig.shape[-1]): if i < 5: final_seeds = label(Img.invert(img_orig[..., i]), connectivity=1, background=0) else: final_seeds = label(img_orig[..., i], connectivity=None, background=0) final_wshed = watershed(bckup_img_wshed, markers=final_seeds, watershed_line=True) final_wshed[final_wshed != 0] = 1 final_wshed[final_wshed == 0] = 255 final_wshed[final_wshed == 1] = 0 differing_bonds[..., i] = final_wshed del final_seeds del final_wshed if _DEBUG: print(os.path.join(output_folder, 'differences.tif')) Img(differing_bonds, dimensions='hwc').save( os.path.join(output_folder, 'differences.tif')) Img(bckup_img_wshed, dimensions='hw').save( os.path.join(output_folder, 'orig_img.tif')) avg = np.mean(differing_bonds, axis=-1) avg = avg / avg.max() if _DEBUG: Img(avg, dimensions='hw').save( os.path.join(output_folder, output_name + str('avg.tif'))) if threshold is None: threshold = self.autothreshold(avg) logger.debug('threshold used for producing the final mask=' + str(threshold)) final_mask = avg.copy() final_mask = self.binarise(final_mask, threshold=threshold) if _DEBUG: Img(final_mask, dimensions='hw').save( os.path.join(output_folder, 'binarized.tif')) # close wshed mask to fill super tiny holes s = ndimage.generate_binary_structure(2, 1) final_mask = ndimage.grey_dilation(final_mask, footprint=s) # remove super tiny artificial cells (very small value cause already dilated) mask = label(Img.invert(final_mask), connectivity=1, background=0) for region in regionprops(mask): if region.area < 5: for coordinates in region.coords: final_mask[coordinates[0], coordinates[1]] = 255 del mask final_mask = label(Img.invert(final_mask), connectivity=1, background=0) final_mask = watershed(bckup_img_wshed, markers=final_mask, watershed_line=True) final_mask[final_mask != 0] = 1 final_mask[final_mask == 0] = 255 final_mask[final_mask == 1] = 0 if filter is None or filter == 0: return final_mask.astype(np.uint8) else: logger.debug('Further filtering image') return FilterMask(bckup_img_wshed, final_mask, filter=filter, correction_factor=correction_factor)
return image return get_optimized_mask2(original, sauvola_mask=None, score_before_adding=True) # --> True if __name__ == '__main__': from timeit import default_timer as timer # image = Img('/D/final_folder_scoring/predict_hybrid/mini_test.tif') # image = Img('/D/final_folder_scoring/predict_hybrid/AVG_StackFocused_Endocad-GFP(6-12-13)#19_000.tif') # image = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/122.tif')[...,0] # image = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/image_plant_best-zoomed.tif')[...,0] image = Img( '/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/5.tif' )[..., 0] # image = Img('/D/final_folder_scoring/predict_hybrid/tmp11.png') # image = Img('/D/final_folder_scoring/predict_hybrid/11-1_nuclei_1.tif') start = timer() final_mask = segment_cells(image, __DEBUG=False, __VISUAL_DEBUG=False, stop_at_threshold_step=False) duration = timer() - start print(duration) plt.imshow(final_mask) plt.show()
'y1': int(self.y1), 'x2': int(self.x2), 'y2': int(self.y2) } if __name__ == '__main__': # just for a test app = QApplication(sys.argv) ex = crop_or_preview() # ex = crop_or_preview(preview_only=True) # img = Img('/home/aigouy/mon_prog/Python/Deep_learning/unet/data/membrane/test/11.png') # img = Img('/home/aigouy/mon_prog/Python/Deep_learning/unet/data/membrane/test/122.png') # img = Img('/home/aigouy/mon_prog/Python/data/3D_bicolor_ovipo.tif') # img = Img('/home/aigouy/mon_prog/Python/data/Image11.lsm') # img = Img('/home/aigouy/mon_prog/Python/data/lion.jpeg') # img = Img('/home/aigouy/mon_prog/Python/data/epi_test.png') img = Img( '/home/aigouy/Bureau/201106_armGFP_49hAPF/tests_CARE_stack_foc/predict_raw_CARE_their_training_my_soft/200709_armGFP_suz_46hAPF_ON.lif - Series008.tif' ) ex.set_image(img) # test = QRectF(None, None, 128, 128) ex.setRoi(None, None, 128, 256) # ex.set_image(None) ex.show() app.exec_() print(ex.get_crop_parameters())
def process(self, input=None, output_folder=None, progress_callback=None, filter=None, correction_factor=2, cutoff_cell_fusion=None, restore_safe_cells=False, _DEBUG=False, _VISUAL_DEBUG=False, **kwargs): start = timer() # filename0 = path # filename0_without_path = os.path.basename(filename0) # filename0_without_ext = os.path.splitext(filename0_without_path)[0] # parent_dir_of_filename0 = os.path.dirname(filename0) # TA_output_filename = os.path.join(parent_dir_of_filename0, filename0_without_ext, # 'handCorrection.tif') # TODO allow custom names here to allow ensemble methods # non_TA_final_output_name = os.path.join(output_folder, filename0_without_ext + '.tif') # # filename_to_use_to_save = non_TA_final_output_name # if TA_mode: # filename_to_use_to_save = TA_output_filename # # if TA_mode: # # try also to change path input name # if os.path.exists( # os.path.join(parent_dir_of_filename0, filename0_without_ext, 'raw_epyseg_output.tif')): # path = os.path.join(parent_dir_of_filename0, filename0_without_ext, 'raw_epyseg_output.tif') # img_orig = Img(path) # print('analyzing', path, self.stop_now) # try: # if self.progress_callback is not None: # self.progress_callback.emit((iii / len(list_of_files)) * 100) # else: # logger.info(str((iii / len(list_of_files)) * 100) + '%') # except: # traceback.print_exc() # pass # DO A DILATION OF SEEDS THEN AN EROSION TO JOIN CLOSE BY SEEDS img_orig = input img_has_seeds = True # mask with several channels if img_orig.has_c(): if restore_safe_cells: img_seg = img_orig[..., 0].copy() seeds_1 = img_orig[..., img_orig.shape[-1] - 1] seeds_1 = Img.invert(seeds_1) # seeds_1[seeds_1 >= 0.5] = 255 # seeds_1[seeds_1 < 0.5] = 0 seeds_1[seeds_1 >= 0.2] = 255 # TODO maybe be more stringent here seeds_1[seeds_1 < 0.2] = 0 s = ndimage.generate_binary_structure(2, 1) seeds_1 = ndimage.grey_dilation(seeds_1, footprint=s) seeds_1 = ndimage.grey_dilation(seeds_1, footprint=s) seeds_1 = ndimage.grey_dilation(seeds_1, footprint=s) seeds_1 = ndimage.grey_erosion(seeds_1, footprint=s) seeds_1 = ndimage.grey_erosion(seeds_1, footprint=s) # seeds_1 = ndimage.grey_erosion(seeds_1, footprint=s) # seeds_1 = ndimage.grey_erosion(seeds_1, footprint=s) # for debug if _DEBUG: Img(seeds_1, dimensions='hw').save( os.path.join(output_folder, 'extras', 'wshed_seeds.tif')) # not bad lab_seeds = label(seeds_1.astype(np.uint8), connectivity=2, background=0) # for region in regionprops(lab_seeds): if region.area < 10: for coordinates in region.coords: lab_seeds[coordinates[0], coordinates[1]] = 0 if _DEBUG: Img(seeds_1, dimensions='hw').save( os.path.join(output_folder, 'extras', 'wshed_seeds_deblobed.tif')) img_orig[..., 3] = Img.invert(img_orig[..., 3]) img_orig[..., 4] = Img.invert(img_orig[..., 4]) # seems to work --> now need to do the projection for c in range(1, img_orig.shape[-1] - 2): img_orig[..., 0] += img_orig[..., 1] img_orig[..., 0] /= img_orig.shape[-1] - 2 img_orig = img_orig[..., 0] else: # mask with single channel img_has_seeds = False if restore_safe_cells: img_seg = img_orig.copy() if restore_safe_cells: if _DEBUG: print(os.path.join(output_folder, 'extras', 'img_seg.tif')) Img(img_seg, dimensions='hw').save( os.path.join(output_folder, 'extras', 'img_seg.tif')) # for debug if _DEBUG: Img(img_orig, dimensions='hw').save(os.path.join(output_folder, 'extras', 'avg.tif')) img_saturated = img_orig.copy() if img_has_seeds: img_saturated[img_saturated >= 0.5] = 255 img_saturated[img_saturated < 0.5] = 0 if restore_safe_cells: # TODO maybe do a safe image img_seg[img_seg >= 0.3] = 255 img_seg[img_seg < 0.3] = 0 secure_mask = img_seg else: img_saturated[img_saturated >= 0.3] = 255 img_saturated[img_saturated < 0.3] = 0 if restore_safe_cells: img_seg[img_seg >= 0.95] = 255 img_seg[img_seg < 0.95] = 0 secure_mask = img_seg # convert it to seeds and make sure they are all present in there # if pixel is not labeled then read it if restore_safe_cells: labels_n_area_rescue_seeds = {} rescue_seeds = label(Img.invert(secure_mask), connectivity=1, background=0) for region in regionprops(rescue_seeds): labels_n_area_rescue_seeds[region.label] = region.area if _DEBUG: Img(secure_mask, dimensions='hw').save(os.path.join(output_folder, 'extras', 'secure_mask.tif')) # loop over those seeds to rescue # for debug if _DEBUG: Img(img_saturated, dimensions='hw').save( os.path.join(output_folder, 'extras', 'handCorrection.tif')) deblob = True if deblob: image_thresh = label(img_saturated, connectivity=2, background=0) # for debug if _DEBUG: Img(image_thresh, dimensions='hw').save( os.path.join(output_folder, 'extras', 'before_deblobed.tif')) # deblob min_size = 200 for region in regionprops(image_thresh): # take regions with large enough areas if region.area < min_size: for coordinates in region.coords: image_thresh[coordinates[0], coordinates[1]] = 0 image_thresh[image_thresh > 0] = 255 img_saturated = image_thresh # for debug if _DEBUG: Img(img_saturated, dimensions='hw').save( os.path.join(output_folder, 'extras', 'deblobed.tif')) del image_thresh # for debug if _DEBUG: Img(img_saturated, dimensions='hw').save( os.path.join(output_folder, 'extras', 'deblobed_out.tif')) extra_dilations = True if extra_dilations: # do a dilation of 2 to close bonds s = ndimage.generate_binary_structure(2, 1) dilated = ndimage.grey_dilation(img_saturated, footprint=s) dilated = ndimage.grey_dilation(dilated, footprint=s) # Img(dilated, dimensions='hw').save(os.path.join(os.path.splitext(path)[0], 'filled_one_px_holes.tif')) # other_seeds = label(invert(np.grey_dilation(dilated, footprint=s).astype(np.uint8)), connectivity=1, background=0) labs = label(Img.invert(img_saturated.astype(np.uint8)), connectivity=1, background=0) for region in regionprops(labs): seeds = [] # exclude tiny cells form dilation because they may end up completely closed if region.area >= 10 and region.area < 350: for coordinates in region.coords: dilated[coordinates[0], coordinates[1]] = 0 continue else: # pb when big cells around cause connections are not done # preserve cells at edges because they have to e naturally smaller because they are cut # put a size criterion too if region.area < 100 and ( region.bbox[0] <= 1 or region.bbox[1] <= 1 or region.bbox[2] >= labs.shape[-2] - 2 or region.bbox[ 3] >= \ labs.shape[-1] - 2): # edge cell detected --> removing dilation for coordinates in region.coords: dilated[coordinates[0], coordinates[1]] = 0 continue img_saturated = dilated # for debug if _DEBUG: Img(img_saturated, dimensions='hw').save( os.path.join(output_folder, 'extras', 'dilated_further.tif')) del dilated list_of_cells_to_dilate = [] labs = label(Img.invert(img_saturated.astype(np.uint8)), connectivity=1, background=0) # c'est cette correction qui fixe bcp de choses mais recree aussi des choses qui n'existent pas... --> voir à quoi sont dus ces lignes blobs # faudrait redeblober if img_has_seeds: for region in regionprops(labs, intensity_image=img_orig): seeds = [] if not extra_dilations and region.area < 10: continue # if small and no associated seeds --> remove it ??? maybe or not for coordinates in region.coords: id = lab_seeds[coordinates[0], coordinates[1]] if id != 0: seeds.append(id) seeds = set(seeds) if len(seeds) >= 2: # we may have found an undersegmented cell --> try segment it better list_of_cells_to_dilate.append(region.label) if len(list_of_cells_to_dilate) != 0: props = regionprops(labs, intensity_image=img_orig) for run in range(10): something_changed = False # early stop for region in props: if region.label not in list_of_cells_to_dilate: continue # TODO recheck those values and wether it makes sense threshold_values = [80 / 255, 60 / 255, 40 / 255, 30 / 255, 20 / 255, 10 / 255] # 160 / 255, 140 / 255, 120 / 255, 100 / 255, 1 / 255 , 2 / 255, , 5 / 255 try: for threshold in threshold_values: mask = region.image.copy() image = region.image.copy() image[region.intensity_image > threshold] = True image[region.intensity_image <= threshold] = False final = Img.invert(image.astype(np.uint8)) final[final < 255] = 0 final[mask == False] = 0 new_seeds = label(final, connectivity=1, background=0) props2 = regionprops(new_seeds) if len(props2) > 1: # cell was resplitted into smaller for r in props2: if r.area < 20: raise Exception region.image[mask == False] = False region.image[mask == True] = True region.image[new_seeds > 0] = False something_changed = True for coordinates in region.coords: img_saturated[coordinates[0], coordinates[1]] = 255 region.image[mask == False] = False region.image[mask == True] = True del final del new_seeds except: traceback.print_exc() pass if not something_changed: # print('no more changes anymore --> quitting') break # for debug if _DEBUG: Img(img_saturated, dimensions='hw').save( os.path.join(output_folder, 'extras', 'saturated_mask4.tif')) final_seeds = label(Img.invert(img_saturated), connectivity=1, background=0) # keep like that otherwise creates tiny cells with erroneous wshed # for debug if _DEBUG: Img(final_seeds, dimensions='hw').save( os.path.join(output_folder, 'extras', 'final_seeds_before.tif')) final_seeds = label(Img.invert(img_saturated), connectivity=2, background=0) # is that needed ??? # for debug if _DEBUG: Img(final_seeds, dimensions='hw').save( os.path.join(output_folder, 'extras', 'final_seeds_before2.tif')) final_seeds[img_saturated == 255] = 0 final_wshed = watershed(img_orig, markers=final_seeds, watershed_line=True) final_wshed[final_wshed != 0] = 1 # remove all seeds final_wshed[final_wshed == 0] = 255 # set wshed values to 255 final_wshed[final_wshed == 1] = 0 # set all other cell content to # filename0 = os.path.basename(path) # parent_path = os.path.dirname(os.path.dirname(path)) if filter is None or filter == 0: # TODO maybe offer the choice between saving wshed on predict or on orig # Img(final_wshed, dimensions='hw').save(os.path.join(output_folder, os.path.splitext(filename0)[ # 0]) + '.tif') # need put original name here TODO put image default name here # print('saving', filename_to_use_to_save) # Img(final_wshed.astype(np.uint8), dimensions='hw').save(filename_to_use_to_save) return final_wshed.astype(np.uint8) else: if isinstance(filter, int): filter_by_size = filter else: filter_by_size = None avg_area = 0 count = 0 if _DEBUG: Img(final_wshed, dimensions='hw').save(os.path.join(output_folder, 'extras', 'test_size_cells.tif')) final_seeds = Img.invert(final_wshed) final_seeds = label(final_seeds, connectivity=1, background=0) if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() removed_seeds = [] keep_seeds = [] labels_n_bbox = {} labels_n_area = {} border_cells = [] ids_n_local_median = {} correspondance_between_cur_seeds_and_safe_ones = {} if isinstance(filter, str) and 'local' in filter: rps = regionprops(final_seeds) for region in rps: labels_n_bbox[region.label] = region.bbox labels_n_area[region.label] = region.area if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[-2] - 5 or region.bbox[ 3] >= \ final_seeds.shape[-1] - 5): border_cells.append(region.label) if restore_safe_cells: for coordinates in region.coords: if rescue_seeds[coordinates[0], coordinates[1]] != 0: # do r correspondance_between_cur_seeds_and_safe_ones[region.label] = rescue_seeds[ coordinates[0], coordinates[1]] break break _, tiles = Img.get_2D_tiles_with_overlap(final_seeds, overlap=64, dimension_h=-2, dimension_w=-1) for r in tiles: for tile in r: rps2 = regionprops(tile) for region in rps2: if self.stop_now: return if region.label in border_cells: continue if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[ -2] - 5 or region.bbox[ 3] >= \ final_seeds.shape[-1] - 5): continue area_of_neighboring_cells = [] for region2 in rps2: if region2.label == region.label: continue # find all cells with if self.rect_distance(region.bbox, region2.bbox) <= 1: area_of_neighboring_cells.append(labels_n_area[region2.label]) if area_of_neighboring_cells: median = statistics.median_low(area_of_neighboring_cells) ids_n_local_median[ region.label] = median / correction_factor if region.area <= median / correction_factor: removed_seeds.append(region.label) else: keep_seeds.append(region.label) removed_seeds = [x for x in removed_seeds if x not in keep_seeds] # TODO offer the things below as an option --> prevent removal of sure seeds or something like that if restore_safe_cells: removed_seeds_to_restore = [] for region in regionprops(final_seeds): if region.label in removed_seeds: first = True for coordinates in region.coords: if first and rescue_seeds[coordinates[0], coordinates[1]] != 0: percent_diff = min(labels_n_area[region.label], labels_n_area_rescue_seeds[ rescue_seeds[coordinates[0], coordinates[1]]]) / max( labels_n_area[region.label], labels_n_area_rescue_seeds[ rescue_seeds[coordinates[0], coordinates[1]]]) if (percent_diff >= 0.7 and percent_diff < 1.0) or ( labels_n_area[region.label] <= 200 and ( percent_diff >= 0.3 and percent_diff < 1.0)): if _DEBUG: print('0 finally not removing seed, safe seed', region.label, percent_diff, labels_n_area[region.label], labels_n_area_rescue_seeds[ rescue_seeds[coordinates[0], coordinates[1]]], labels_n_area[region.label] / labels_n_area_rescue_seeds[ rescue_seeds[coordinates[0], coordinates[1]]], region.centroid) removed_seeds_to_restore.append(region.label) break break removed_seeds = [x for x in removed_seeds if x not in removed_seeds_to_restore] else: areas = [] for region in regionprops(final_seeds): if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[-2] - 5 or region.bbox[3] >= final_seeds.shape[-1] - 5): continue avg_area += region.area count += 1 areas.append(region.area) avg_area /= count median = statistics.median_low(areas) if isinstance(filter, int): filter_by_size = filter elif 'avg' in filter: filter_by_size = avg_area / correction_factor elif 'median' in filter: filter_by_size = median / correction_factor # TODO maybe use stdev or alike to see if cell should really be removed if _DEBUG: print('filter cells below=', filter_by_size, 'avg cell area=', avg_area, 'median=', median) # , 'median', median if filter_by_size is not None and filter_by_size != 0: if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() for region in regionprops(final_seeds): labels_n_bbox[region.label] = region.bbox labels_n_area[region.label] = region.area if region.area < filter_by_size: if (region.bbox[0] <= 2 or region.bbox[1] <= 2 or region.bbox[2] >= labs.shape[ -2] - 3 or region.bbox[ 3] >= \ labs.shape[ -1] - 3): continue removed_seeds.append(region.label) if cutoff_cell_fusion is not None and cutoff_cell_fusion > 1: cells_to_fuse = [] for idx, removed_seed in enumerate(removed_seeds): current_cells_to_fuse = set() closest_pair = None smallest_distance = None for idx2 in range(idx + 1, len(removed_seeds)): removed_seed2 = removed_seeds[idx2] if closest_pair is None: if self.rect_distance(labels_n_bbox[removed_seed], labels_n_bbox[removed_seed2]) <= 1: closest_pair = removed_seed2 smallest_distance = self.rect_distance(labels_n_bbox[removed_seed], labels_n_bbox[removed_seed2]) elif self.rect_distance(labels_n_bbox[removed_seed], labels_n_bbox[removed_seed2]) <= smallest_distance: closest_pair = removed_seed2 smallest_distance = self.rect_distance(labels_n_bbox[removed_seed], labels_n_bbox[removed_seed2]) if self.rect_distance(labels_n_bbox[removed_seed], labels_n_bbox[removed_seed2]) <= 1: current_cells_to_fuse.add(removed_seed) current_cells_to_fuse.add(removed_seed2) if current_cells_to_fuse: cells_to_fuse.append(current_cells_to_fuse) cells_to_fuse = [frozenset(i) for i in cells_to_fuse] cells_to_fuse = list(dict.fromkeys(cells_to_fuse)) cells_to_keep = [] if cutoff_cell_fusion is not None and cutoff_cell_fusion > 0: superfuse = [] copy_of_cells_to_fuse = cells_to_fuse.copy() for idx, fuse in enumerate(copy_of_cells_to_fuse): current_fusion = set(fuse.copy()) changed = True while changed: changed = False for idx2 in range(len(copy_of_cells_to_fuse) - 1, idx, -1): fuse2 = copy_of_cells_to_fuse[idx2] if idx2 == idx: continue if fuse2.intersection(current_fusion): current_fusion.update(fuse2) del copy_of_cells_to_fuse[idx2] changed = True superfuse.append(current_fusion) for sf in superfuse: if len(sf) > cutoff_cell_fusion: for val in sf: cells_to_keep.append(val) seeds_to_fuse = [] cells_to_fuse = sorted(cells_to_fuse, key=len) for fuse in cells_to_fuse: cumulative_area = 0 for _id in fuse: if _id in cells_to_keep: if _id in removed_seeds: removed_seeds.remove(_id) continue cumulative_area += labels_n_area[_id] if filter_by_size is not None: if cumulative_area >= filter_by_size: #: #1200: #filter_by_size: # need hack this to get local area seeds_to_fuse.append(fuse) for _id in fuse: if _id in removed_seeds: removed_seeds.remove(_id) else: if cumulative_area >= ids_n_local_median[_id]: seeds_to_fuse.append(fuse) for _id in fuse: if _id in removed_seeds: removed_seeds.remove(_id) # need recolor all the seeds in there with the new seed stuff for fuse in seeds_to_fuse: for _id in fuse: break for region in regionprops(final_seeds): if region.label in fuse: for coordinates in region.coords: final_seeds[coordinates[0], coordinates[1]] = _id if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() for region in regionprops(final_seeds): if region.label in removed_seeds: for coordinates in region.coords: final_seeds[coordinates[0], coordinates[1]] = 0 if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() final_wshed = watershed(img_orig, markers=final_seeds, watershed_line=True) final_wshed[final_wshed != 0] = 1 # remove all seeds final_wshed[final_wshed == 0] = 255 # set wshed values to 255 final_wshed[final_wshed == 1] = 0 # set all other cell content to if _VISUAL_DEBUG: plt.imshow(final_wshed) plt.show() # print('saving', filename_to_use_to_save) # Img(final_wshed.astype(np.uint8), dimensions='hw').save(filename_to_use_to_save) duration = timer() - start if _DEBUG: print('final duration wshed in secs', duration) return final_wshed.astype(np.uint8) # is indeed a 2D image
except: self.x1 = self.x2 = self.y1 = self.y2 = None def get_crop_parameters(self): self.update_ROI() if self.x1 is None: return None return { 'x1': int(self.x1), 'y1': int(self.y1), 'x2': int(self.x2), 'y2': int(self.y2) } if __name__ == '__main__': # just for a test app = QApplication(sys.argv) ex = crop_or_preview() # ex = crop_or_preview(preview_only=True) # img = Img('/home/aigouy/mon_prog/Python/Deep_learning/unet/data/membrane/test/11.png') # img = Img('/home/aigouy/mon_prog/Python/Deep_learning/unet/data/membrane/test/122.png') # img = Img('/home/aigouy/mon_prog/Python/data/3D_bicolor_ovipo.tif') # img = Img('/home/aigouy/mon_prog/Python/data/Image11.lsm') # img = Img('/home/aigouy/mon_prog/Python/data/lion.jpeg') img = Img('/home/aigouy/mon_prog/Python/data/epi_test.png') ex.set_image(img) # ex.set_image(None) ex.show() app.exec_()
def FilterMask(img_orig, final_wshed, filter='local median', correction_factor=2, _DEBUG=False, _VISUAL_DEBUG=False, **kwargs): labs = label(Img.invert(final_wshed.astype(np.uint8)), connectivity=1, background=0) start = timer() output_folder = '/home/aigouy/Bureau/trash/test_new_seeds_seg_stuff/' if filter is None or filter == 0: return final_wshed.astype(np.uint8) else: if isinstance(filter, int): filter_by_size = filter else: filter_by_size = None avg_area = 0 count = 0 if _DEBUG: Img(final_wshed, dimensions='hw').save(os.path.join(output_folder, 'extras', 'test_size_cells.tif')) final_seeds = Img.invert(final_wshed) final_seeds = label(final_seeds, connectivity=1, background=0) if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() removed_seeds = [] keep_seeds = [] labels_n_bbox = {} labels_n_area = {} border_cells = [] ids_n_local_median = {} if isinstance(filter, str) and 'local' in filter: rps = regionprops(final_seeds) for region in rps: labels_n_bbox[region.label] = region.bbox labels_n_area[region.label] = region.area if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[-2] - 5 or region.bbox[ 3] >= \ final_seeds.shape[-1] - 5): border_cells.append(region.label) _, tiles = Img.get_2D_tiles_with_overlap(final_seeds, overlap=64, dimension_h=-2, dimension_w=-1) for r in tiles: for tile in r: rps2 = regionprops(tile) for region in rps2: if region.label in border_cells: continue if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[ -2] - 5 or region.bbox[ 3] >= \ final_seeds.shape[-1] - 5): continue area_of_neighboring_cells = [] for region2 in rps2: if region2.label == region.label: continue # find all cells with if rect_distance(region.bbox, region2.bbox) <= 1: area_of_neighboring_cells.append(labels_n_area[region2.label]) if area_of_neighboring_cells: median = statistics.median_low(area_of_neighboring_cells) ids_n_local_median[ region.label] = median / correction_factor if region.area <= median / correction_factor: removed_seeds.append(region.label) else: keep_seeds.append(region.label) removed_seeds = [x for x in removed_seeds if x not in keep_seeds] # TODO offer the things below as an option --> prevent removal of sure seeds or something like that else: areas = [] for region in regionprops(final_seeds): if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[-2] - 5 or region.bbox[3] >= final_seeds.shape[-1] - 5): continue avg_area += region.area count += 1 areas.append(region.area) avg_area /= count median = statistics.median_low(areas) if isinstance(filter, int): filter_by_size = filter elif 'avg' in filter: filter_by_size = avg_area / correction_factor elif 'median' in filter: filter_by_size = median / correction_factor # TODO maybe use stdev or alike to see if cell should really be removed if _DEBUG: print('filter cells below=', filter_by_size, 'avg cell area=', avg_area, 'median=', median) # , 'median', median if filter_by_size is not None and filter_by_size != 0: if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() for region in regionprops(final_seeds): labels_n_bbox[region.label] = region.bbox labels_n_area[region.label] = region.area if region.area < filter_by_size: if (region.bbox[0] <= 2 or region.bbox[1] <= 2 or region.bbox[2] >= labs.shape[ -2] - 3 or region.bbox[ 3] >= \ labs.shape[ -1] - 3): continue removed_seeds.append(region.label) if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() for region in regionprops(final_seeds): if region.label in removed_seeds: for coordinates in region.coords: final_seeds[coordinates[0], coordinates[1]] = 0 if _VISUAL_DEBUG: plt.imshow(final_seeds) plt.show() final_wshed = watershed(img_orig, markers=final_seeds, watershed_line=True) final_wshed[final_wshed != 0] = 1 # remove all seeds final_wshed[final_wshed == 0] = 255 # set wshed values to 255 final_wshed[final_wshed == 1] = 0 # set all other cell content to if _VISUAL_DEBUG: plt.imshow(final_wshed) plt.show() duration = timer() - start if _DEBUG: print('final duration wshed in secs', duration) return final_wshed.astype(np.uint8)
def get_optimized_mask2(img, sauvola_mask=None, use_quick_shift=False, __VISUAL_DEBUG=False, __DEBUG=False, score_before_adding=False, return_seeds=False): final_image = None if use_quick_shift: rotations = [0, 2, 3] kernels = [1, 1.33, 1.66, 2] for kern in kernels: for rot in rotations: segments_quick = getQuickseg(img, nb_of_90_rotation=rot, kernel_size=kern) segments_quick = segments_quick.astype(np.uint8) if final_image is None: final_image = segments_quick else: final_image = final_image + segments_quick if __VISUAL_DEBUG: plt.imshow(final_image) plt.title("avg") plt.show() final_image[final_image < final_image.max()] = 0 final_image[final_image >= final_image.max()] = 1 if sauvola_mask is None: from epyseg.postprocess.edmshed import sauvola t = sauvola(img, min_threshold=0.1, window_size=25) img[img >= t] = 1 img[img < t] = 0 else: img = sauvola_mask img[img > 0] = 1 if __DEBUG: Img(img, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/sauvola_mask.tif') raw_sauvola = img.copy() if use_quick_shift: final_image[raw_sauvola != 0] = 1 if __VISUAL_DEBUG: plt.imshow(final_image) plt.show() if __DEBUG: Img(final_image.astype(np.uint8) * 255, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/corrected_stuff.tif') final_image = ~remove_small_objects( ~final_image.astype(np.bool), min_size=5, connectivity=1) final_image = skeletonize(final_image) if __DEBUG: Img(final_image.astype(np.uint8), dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/skel_quick.tif') if __VISUAL_DEBUG: plt.imshow(final_image) plt.title("binary") plt.show() vertices_quick, cut_bonds_quick = split_into_vertices_and_bonds( final_image) if __DEBUG: Img(vertices_quick.astype(np.uint8), dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/vertices_quick.tif') Img(cut_bonds_quick.astype(np.uint8), dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/cut_bonds_quick.tif') img = skeletonize(img) if __DEBUG: Img(img, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/skel_sauvola_mask.tif') img = remove_small_objects(img, min_size=6, connectivity=2) if __DEBUG: Img(img, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/skel_sauvola_mask_deblobed.tif') if __VISUAL_DEBUG: plt.imshow(img) plt.show() image = img.copy() image = image.astype(np.uint8) * 255 image = Img.invert(image) distance = distance_transform_edt(image) if __VISUAL_DEBUG: plt.imshow(distance) plt.show() local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((8, 8)), labels=image) distance = -distance markers = ndimage.label(local_maxi, structure=generate_binary_structure(2, 2))[0] if __VISUAL_DEBUG: plt.imshow(markers) plt.show() if __DEBUG: Img(markers, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/markers_0.tif') labels = watershed(distance, markers, watershed_line=True) # --> maybe implement that too labels[labels != 0] = 1 labels[labels == 0] = 255 labels[labels == 1] = 0 labels[labels == 255] = 1 if __VISUAL_DEBUG: plt.imshow(labels) plt.title('raw wshed') plt.show() if __DEBUG: Img(labels.astype(np.uint8), dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/wshed_edm.tif') vertices_edm, cut_bonds_edm = split_into_vertices_and_bonds(labels) if __DEBUG: Img(vertices_edm.astype(np.uint8), dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/vertices_edm.tif') Img(cut_bonds_edm.astype(np.uint8), dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/cut_bonds_edm.tif') if use_quick_shift: if __DEBUG: Img(img, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/skel_sauvola_mask_deblobed.tif' ) unconnected = detect_unconnected_bonds(img) if __DEBUG: Img(unconnected, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/unconnected.tif') labels_quick = label(cut_bonds_quick, connectivity=2, background=0) labels_quick_vertices = label(vertices_quick, connectivity=2, background=0) props_labels_quick = regionprops(labels_quick) labels_pred = label(unconnected, connectivity=2, background=0) raw_sauvola = rescue_bonds(labels_pred, labels_quick, raw_sauvola, labels_quick_vertices, props_labels_quick, score_before_adding=score_before_adding) if __DEBUG: Img(raw_sauvola.astype(np.uint8), dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/corrected_bonds_sauvola.tif') labels_edm = label(cut_bonds_edm, connectivity=2, background=0) labels_edm_vertices = label(vertices_edm, connectivity=2, background=0) props_labels_edm = regionprops(labels_edm) img = skeletonize(raw_sauvola) if __DEBUG: Img(raw_sauvola, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/skel_sauvola_mask2.tif') unconnected = detect_unconnected_bonds(img) if __DEBUG: Img(unconnected, dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/unconnected.tif') labels_pred = label(unconnected, connectivity=2, background=0) raw_sauvola = rescue_bonds(labels_pred, labels_edm, raw_sauvola, labels_edm_vertices, props_labels_edm, score_before_adding=score_before_adding) raw_sauvola = connect_unconnected(labels_pred, labels_edm, raw_sauvola, props_labels_edm, labels_edm_vertices) if return_seeds: return markers, labels, raw_sauvola return raw_sauvola
# img = Img.invert(img) # img = Img('D:/Dropbox/stuff_for_the_new_figure/old/predict_avg_hq_correction_ensemble_wshed/focused_Series010.tif')[...,0].astype(np.float) # img = Img('D:/Dropbox/stuff_for_the_new_figure/old/predict_avg_hq_correction_ensemble_wshed/focused_Series194.tif')[...,0].astype(np.float) # img = Img('D:/Dropbox/stuff_for_the_new_figure/old/predict_avg_hq_correction_ensemble_wshed/Series019.tif')[...,0].astype(np.float) # img = Img('D:/Dropbox/stuff_for_the_new_figure/old/predict_avg_hq_correction_ensemble_wshed/image_plant_best-zoomed.tif')[...,0].astype(np.float) # img = Img('D:/Dropbox/stuff_for_the_new_figure/old/predict_avg_hq_correction_ensemble_wshed/100708_png06.tif')[...,0].astype(np.float) # img = Img('D:/Dropbox/stuff_for_the_new_figure/old/predict_avg_hq_correction_ensemble_wshed/MAX_160610_test_ocelli_ok_but_useless_cause_differs_a_lot_from_ommatidia.lif - test_visualization_head_ommatidia_32h_APF_ok_2.tif')[...,0].astype(np.float) # img = Img('D:/Dropbox/stuff_for_the_new_figure/old/predict_avg_hq_correction_ensemble_wshed/proj0016.tif')[...,0].astype(np.float) # img = Img('D:/Dropbox/mini_test.tif').astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/5.tif')[...,0].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/5.tif')[...,1].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/5.tif')[...,2].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/122.tif')[...,0].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/11.tif')[...,0].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/11.tif')[...,1].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/11.tif')[...,2].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/cellpose_img22.tif')[...,0].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/cellpose_img22_bg_subtracted_ij.tif')[...,0].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/focused_Series010.tif')[...,0].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/focused_Series194.tif')[..., 0].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/focused_Series194.tif')[..., 1].astype(np.float) # img = Img('/D/final_folder_scoring/predict_avg_hq_correction_ensemble_wshed/focused_Series194.tif')[..., 2].astype(np.float) img = Img('/D/final_folder_scoring/predict/11.tif')[..., 0] raw_sauvola = get_optimized_mask2(img, __VISUAL_DEBUG=True, __DEBUG=True) Img(raw_sauvola.astype(np.uint8), dimensions='hw').save( '/home/aigouy/Bureau/trash/trash4/corrected_bonds_sauvola2.tif') print('total time', timer() - start)
class Image2D(Rect2D): def __init__(self, *args, x=None, y=None, width=None, height=None, data=None, dimensions=None, opacity=1., stroke=0.65, **kwargs): self.isSet = False self.annotation = [] # should contain the objects for annotating imaging --> shapes and texts self.letter = None # when objects are swapped need change the letter if args: if len(args) == 1: self.filename = args[0] else: self.filename = None if x is None and y is None and width is not None and height is not None: super(Image2D, self).__init__(0, 0, width, height) self.isSet = True elif x is None and y is None and width is None and height is None: # print('in 0') self.img = Img(self.filename) self.qimage = self.img.getQimage() width = self.img.get_width() height = self.img.get_height() super(Image2D, self).__init__(0, 0, width, height) self.isSet = True elif x is not None and y is not None and width is not None and height is not None: self.img = None super(Image2D, self).__init__(x, y, width, height) self.isSet = True elif data is None: if self.filename is not None: self.img = Img(self.filename) self.qimage = self.img.getQimage() if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True elif data is not None: self.img = Img(data, dimensions=dimensions) # need width and height so cannot really be only a numpy stuff --> cause no width or height by default --> or need tags such as image type for dimensions self.qimage = self.img.getQimage() # need Image dimensions id data is not of type IMG --> could check that if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True self.stroke = stroke # DO I REALLY NEED STROKE self.opacity = opacity # @return the block incompressible width def getIncompressibleWidth(self): extra_space = 0 # can add some if boxes around to add text return extra_space # @return the block incompressible height def getIncompressibleHeight(self): extra_space = 0 # can add some if boxes around to add text return extra_space def setLetter(self, letter): self.letter = letter def draw(self, painter, draw=True): if draw: painter.save() painter.setOpacity(self.opacity) if draw: if self.img is not None: qsource = QRectF(0,0,self.img.get_width(), self.img.get_height()) painter.drawImage(self, self.qimage , qsource) # , flags=QtCore.Qt.AutoColor else: painter.drawRect(self) painter.restore() # then need to draw the letter if self.letter is not None: self.letter.set_P1(self.get_P1()) self.letter.draw(painter) if self.annotation is not None and self.annotation: for annot in self.annotation: annot.drawAndFill(draw=draw) def fill(self, painter, draw=True): if self.fill_color is None: return if draw: painter.save() painter.setOpacity(self.opacity) if draw: if self.img is not None: qsource = QRectF(0, 0, self.img.get_width(), self.img.get_height()) painter.drawImage(self, self.qimage , qsource) else: painter.drawRect(self) painter.restore() def drawAndFill(self, painter): painter.save() if self.img is not None: qsource = QRectF(0, 0, self.img.get_width(), self.img.get_height()) painter.drawImage(self, self.qimage , qsource) else: painter.drawRect(self) painter.restore() def __add__(self, other): from epyseg.figure.row import Row # KEEP Really required to avoid circular imports return Row(self, other) # create a Fig with divide def __truediv__(self, other): from deprecated_demos.ezfig_tests.col import col # KEEP Really required to avoid circular imports return col(self, other) #Force the montage width to equal 'width_in_px' def setToWidth(self, width_in_px): pure_image_width = self.width() ratio = width_in_px / pure_image_width self.setWidth(width_in_px) self.setHeight(self.height()*ratio) def setToHeight(self, height_in_px): pure_image_height = self.height() self.setHeight(height_in_px) ratio = height_in_px / pure_image_height self.setWidth(self.width()*ratio)
def __init__(self, *args, x=None, y=None, width=None, height=None, data=None, dimensions=None, opacity=1., **kwargs): self.isSet = False self.scale = 1 self.translation = QPointF() # crops self.__crop_left = 0 self.__crop_right = 0 self.__crop_top = 0 self.__crop_bottom = 0 self.img = None self.annotation = [ ] # should contain the objects for annotating imaging --> shapes and texts self.letter = None # when objects are swapped need change the letter self.top_left_objects = [] self.top_right_objects = [] self.bottom_right_objects = [] self.bottom_left_objects = [] self.centered_objects = [] # if the image is inserted as an inset then draw it as a fraction of parent width # inset parameters self.fraction_of_parent_image_width_if_image_is_inset = 0.25 self.border_size = None # no border by default self.border_color = 0xFFFFFF # white border by default if args: if len(args) == 1: if isinstance(args[0], str): self.filename = args[0] elif isinstance(args[0], Img): self.filename = None self.img = args[0] self.qimage = self.img.getQimage() if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True else: self.filename = None if x is None and y is None and width is not None and height is not None: super(Image2D, self).__init__(0, 0, width, height) self.isSet = True elif x is None and y is None and width is None and height is None and self.filename is not None: # print('in 0') try: self.img = Img(self.filename) except: logger.error('could not load image ' + str(self.filename)) return self.qimage = self.img.getQimage() width = self.img.get_width() height = self.img.get_height() super(Image2D, self).__init__(0, 0, width, height) self.isSet = True elif x is not None and y is not None and width is not None and height is not None and self.img is None: self.img = None super(Image2D, self).__init__(x, y, width, height) self.isSet = True elif data is None: if self.filename is not None: self.img = Img(self.filename) self.qimage = self.img.getQimage() if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True elif data is not None: self.img = Img( data, dimensions=dimensions ) # need width and height so cannot really be only a numpy stuff --> cause no width or height by default --> or need tags such as image type for dimensions self.qimage = self.img.getQimage() # need Image dimensions id data is not of type IMG --> could check that if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True self.opacity = opacity
class Image2D(Rect2D): TOP_LEFT = 0 TOP_RIGHT = 1 BOTTOM_LEFT = 2 BOTTOM_RIGHT = 3 CENTERED = 4 def __init__(self, *args, x=None, y=None, width=None, height=None, data=None, dimensions=None, opacity=1., **kwargs): self.isSet = False self.scale = 1 self.translation = QPointF() # crops self.__crop_left = 0 self.__crop_right = 0 self.__crop_top = 0 self.__crop_bottom = 0 self.img = None self.annotation = [ ] # should contain the objects for annotating imaging --> shapes and texts self.letter = None # when objects are swapped need change the letter self.top_left_objects = [] self.top_right_objects = [] self.bottom_right_objects = [] self.bottom_left_objects = [] self.centered_objects = [] # if the image is inserted as an inset then draw it as a fraction of parent width # inset parameters self.fraction_of_parent_image_width_if_image_is_inset = 0.25 self.border_size = None # no border by default self.border_color = 0xFFFFFF # white border by default if args: if len(args) == 1: if isinstance(args[0], str): self.filename = args[0] elif isinstance(args[0], Img): self.filename = None self.img = args[0] self.qimage = self.img.getQimage() if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True else: self.filename = None if x is None and y is None and width is not None and height is not None: super(Image2D, self).__init__(0, 0, width, height) self.isSet = True elif x is None and y is None and width is None and height is None and self.filename is not None: # print('in 0') try: self.img = Img(self.filename) except: logger.error('could not load image ' + str(self.filename)) return self.qimage = self.img.getQimage() width = self.img.get_width() height = self.img.get_height() super(Image2D, self).__init__(0, 0, width, height) self.isSet = True elif x is not None and y is not None and width is not None and height is not None and self.img is None: self.img = None super(Image2D, self).__init__(x, y, width, height) self.isSet = True elif data is None: if self.filename is not None: self.img = Img(self.filename) self.qimage = self.img.getQimage() if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True elif data is not None: self.img = Img( data, dimensions=dimensions ) # need width and height so cannot really be only a numpy stuff --> cause no width or height by default --> or need tags such as image type for dimensions self.qimage = self.img.getQimage() # need Image dimensions id data is not of type IMG --> could check that if x is None: x = 0 if y is None: y = 0 super(Image2D, self).__init__(x, y, self.img.get_width(), self.img.get_height()) self.isSet = True self.opacity = opacity # @return the block incompressible width def getIncompressibleWidth(self): extra_space = 0 # can add some if boxes around to add text return extra_space # @return the block incompressible height def getIncompressibleHeight(self): extra_space = 0 # can add some if boxes around to add text return extra_space def add_object(self, object, position): if position == Image2D.TOP_LEFT: self.top_left_objects.append(object) elif position == Image2D.BOTTOM_RIGHT: self.bottom_right_objects.append(object) elif position == Image2D.BOTTOM_LEFT: self.bottom_left_objects.append(object) elif position == Image2D.CENTERED: self.centered_objects.append(object) else: self.top_right_objects.append(object) # TODO --> check if contains it def remove_object(self, object, position): if position == Image2D.TOP_LEFT: self.top_left_objects.remove(object) elif position == Image2D.BOTTOM_RIGHT: self.bottom_right_objects.remove(object) elif position == Image2D.BOTTOM_LEFT: self.bottom_left_objects.remove(object) elif position == Image2D.CENTERED: self.centered_objects.remove(object) else: self.top_right_objects.remove(object) def remove_all_objects(self, position): if position == Image2D.TOP_LEFT: del self.top_left_objects self.top_left_objects = [] elif position == Image2D.BOTTOM_RIGHT: del self.bottom_right_objects self.bottom_right_objects = [] elif position == Image2D.BOTTOM_LEFT: del self.bottom_left_objects self.bottom_left_objects = [] elif position == Image2D.CENTERED: del self.centered_objects self.centered_objects = [] else: del self.top_right_objects self.top_right_objects = [] def setLettering(self, letter): if isinstance(letter, TAText2D): self.letter = letter elif isinstance(letter, str): if letter.strip() == '': self.letter = None else: self.letter = TAText2D(letter) # def getRect2D(self): # # self.__class__ = Rect2D # # return super() # # TODO ideally I'd like to get the Rect2D parent but I should think what the best way is to get it... # return self def draw(self, painter, draw=True): if draw: painter.save() painter.setOpacity(self.opacity) # painter.setClipRect(self) # only draw in self --> very useful for inset borders # pb clip rect does not work for svg --> remove for now users can add it manually if desired or I can add it if people really want it and then I should draw relevant lines or shifted rects --> do that later # prevents drawing outside from the image rect_to_plot = self.boundingRect( scaled=True ) #scaled=True #self.adjusted(self.__crop_left, self.__crop_top, self.__crop_right, self.__crop_bottom) # need remove the crops with that # self.scale = 1 # if self.scale is not None and self.scale != 1: # # # TODO KEEP THE ORDER THIS MUST BE DONE THIS WAY OR IT WILL GENERATE PLENTY OF BUGS... # new_width = rect_to_plot.width() * self.scale # new_height = rect_to_plot.height() * self.scale # # # print(rect_to_plot.width(), rect_to_plot.height()) # here ok # # # setX changes width --> why is that # # # # # TODO BE EXTREMELY CAREFUL AS SETX AND SETY CAN CHANGE WIDTH AND HEIGHT --> ALWAYS TAKE SIZE BEFORE OTHERWISE THERE WILL BE A PB AND ALWAYS RESET THE SIZE WHEN SETX IS CALLED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # # Sets the left edge of the rectangle to the given x coordinate. May change the width, but will never change the right edge of the rectangle. --> NO CLUE WHY SHOULD CHANGE WIDTH THOUGH BUT BE CAREFUL!!! # # rect_to_plot.setX(rect_to_plot.x() * self.scale) # # rect_to_plot.setY(rect_to_plot.y() * self.scale) # # # maybe to avoid bugs I should use translate instead rather that set x but ok anyways # # # print(rect_to_plot.width(), rect_to_plot.height())# bug here --> too big # # # # # print(new_height, new_height, self.width(), self.scale, self.scale* self.width()) # rect_to_plot.setWidth(new_width) # rect_to_plot.setHeight(new_height) if self.img is not None: x = 0 y = 0 w = self.img.get_width() h = self.img.get_height() if self.__crop_top is not None: y = self.__crop_top h -= self.__crop_top if self.__crop_left is not None: x = self.__crop_left w -= self.__crop_left if self.__crop_right is not None: w -= self.__crop_right if self.__crop_bottom is not None: h -= self.__crop_bottom # pb here --> see how to really crop qsource = QRectF(x, y, w, h) painter.drawImage(rect_to_plot, self.qimage, qsource) # , flags=QtCore.Qt.AutoColor else: painter.drawRect(rect_to_plot) # letter is good extra_space = 3 # draw annotations first if self.annotation is not None and self.annotation: # need clone the object then set its P1 with respect to position or need a trick to keep original ref and have an updated one just for display but then need renew it all the time --> see how I can do that... # maybe clone is not smart as it duplicates resources without a need for it # but then need clone the original rect and draw with respect to that # and I indeed need scale the shape --> TODO too # indeed thanks to cloning I always preserve original info --> not bad # annot position is good # TODO see how to do that cause not so easy --> think carefully and take inspiration from EZF and improve it for annot in self.annotation: # always empty --> why is that # print('init',annot.get_P1()) # always assume everything is done at 0,0 then do translation # annot.set_P1(self.get_P1().x() + annot.get_P1().x(), self.get_P1().y() + annot.get_P1().y()) # always relative to the parent image # annot.set_P1(self.get_P1()) # always relative to the parent image # print(annot.get_P1()) # print('init', self.get_P1(), 'scale', self.get_scale()) annot.set_to_translation(rect_to_plot.topLeft()) annot.set_to_scale( self.scale) # will f**k the stuff but ok for a test # print('scaled',annot.get_P1()) annot.draw(painter=painter) # print('tranbs', annot.translation) # and indeed I need also to take crop into account in order not to misposition things... if self.letter is not None: self.letter.set_P1(rect_to_plot.topLeft().x() + extra_space, rect_to_plot.topLeft().y() + extra_space) # then draw text and insets --> on top of annotations # TODO need align insets differently than others and need align its bounding box also differently --> TODO but almost there if len(self.top_right_objects) != 0 or len( self.top_left_objects) != 0 or len( self.bottom_left_objects) != 0 or len( self.bottom_right_objects) != 0 or len( self.centered_objects) != 0: # align a scale bar to various positions # maybe if there is a letter first point should be place below stuff # top_left = Point2D(self.get_P1()) top_left_shifted = Point2D(rect_to_plot.topLeft()) # top_left_shifted.setX(top_left_shifted.x() )# + extra_space # top_left_shifted.setY(top_left_shifted.y() )#+ extra_space # print('before', top_left) # if self.letter is not None: # packY(extra_space, self.letter, top_left_shifted) # print('after', top_left) # insets should be aligned to unshifted values # whereas texts should be aligned to shifted ones # what if I try all unshifted # cause in a way it's simpler # top_right = Point2D(self.get_P1()) top_right_shifted = Point2D(rect_to_plot.topLeft()) top_right_shifted.setX(top_right_shifted.x() + rect_to_plot.width()) #- extra_space top_right_shifted.setY(top_right_shifted.y()) #+ extra_space # bottom_left = Point2D(self.get_P1()) bottom_left_shifted = Point2D(rect_to_plot.topLeft()) bottom_left_shifted.setX( bottom_left_shifted.x()) #+ extra_space bottom_left_shifted.setY( bottom_left_shifted.y() + rect_to_plot.height() ) #- extra_space # should align right then pack on top of that --> may need a direction in packing--> TODO bottom_right = Point2D(rect_to_plot.topLeft()) bottom_right_shifted = Point2D(rect_to_plot.topLeft()) bottom_right_shifted.setX( bottom_right_shifted.x() + rect_to_plot.width()) # - extra_space bottom_right_shifted.setY( bottom_right_shifted.y() + rect_to_plot.height()) #- extra_space center = Point2D(rect_to_plot.topLeft()) center.setX(center.x() + rect_to_plot.width() / 2) center.setY(center.y() + rect_to_plot.height() / 2) if len(self.top_left_objects) != 0: # change inset size first for obj in self.top_left_objects: if isinstance(obj, Image2D): obj.setToWidth( rect_to_plot.width() * obj. fraction_of_parent_image_width_if_image_is_inset ) # if letter exists align with respect to it alignTop(top_left_shifted, *self.top_left_objects) alignLeft(top_left_shifted, *self.top_left_objects) if self.letter is not None: # packY(extra_space, self.letter, top_left_shifted) top_left_shifted = self.letter # in fact images really need be aligned left of the image but the others need be aligned with the letter that has an extra space --> TODO --> change some day packY(extra_space, top_left_shifted, *self.top_left_objects) # all images need be shifted back??? to be aligned left for obj in self.top_left_objects: # for drawing of inset borders # if isinstance(obj, Image2D): # # make it draw a border and align it # # painter.save() # img_bounds = Rect2D(obj) # img_bounds.stroke = 3 # # img_bounds.translate(-img_bounds.stroke / 2, -img_bounds.stroke / 2) # img_bounds.color = 0xFFFF00 # img_bounds.fill_color = 0xFFFF00 # img_bounds.draw(painter=painter) # # print(img_bounds) # # painter.restore() obj.draw(painter=painter) if len(self.top_right_objects) != 0: # change inset size first for obj in self.top_right_objects: if isinstance(obj, Image2D): obj.setToWidth( rect_to_plot.width() * obj. fraction_of_parent_image_width_if_image_is_inset ) alignRight(top_right_shifted, *self.top_right_objects) alignTop(top_right_shifted, *self.top_right_objects) packY(extra_space, top_right_shifted, *self.top_right_objects) for obj in self.top_right_objects: # # for drawing of inset borders # if isinstance(obj, Image2D): # # make it draw a border and align it # # painter.save() # img_bounds = Rect2D(obj) # img_bounds.stroke = 3 # # img_bounds.translate(img_bounds.stroke / 2, -img_bounds.stroke / 2) # img_bounds.color = 0xFFFF00 # img_bounds.fill_color = 0xFFFF00 # img_bounds.draw(painter=painter) # # print(img_bounds) # # painter.restore() obj.draw(painter=painter) if len(self.bottom_right_objects) != 0: # change inset size first for obj in self.bottom_right_objects: if isinstance(obj, Image2D): obj.setToWidth( rect_to_plot.width() * obj. fraction_of_parent_image_width_if_image_is_inset ) alignRight(bottom_right_shifted, *self.bottom_right_objects) alignBottom(bottom_right_shifted, *self.bottom_right_objects) packYreverse(extra_space, bottom_right_shifted, *self.bottom_right_objects) # packY(3, top_right, *self.top_right_objects) # I do need to invert packing order for obj in self.bottom_right_objects: # # for drawing of inset borders # if isinstance(obj, Image2D): # # make it draw a border and align it # # painter.save() # img_bounds = Rect2D(obj) # img_bounds.stroke = 3 # # img_bounds.translate(-img_bounds.stroke / 2, img_bounds.stroke / 2) # # should I clip it to the image size --> maybe it's the best # img_bounds.color = 0xFFFF00 # img_bounds.fill_color = 0xFFFF00 # img_bounds.draw(painter=painter) # # print(img_bounds) # # painter.restore() obj.draw(painter=painter) if len(self.bottom_left_objects) != 0: # change inset size first for obj in self.bottom_left_objects: if isinstance(obj, Image2D): obj.setToWidth( rect_to_plot.width() * obj. fraction_of_parent_image_width_if_image_is_inset ) alignLeft(bottom_left_shifted, *self.bottom_left_objects) alignBottom(bottom_left_shifted, *self.bottom_left_objects) packYreverse(extra_space, bottom_left_shifted, *self.bottom_left_objects) for obj in self.bottom_left_objects: # # for drawing of inset borders # if isinstance(obj, Image2D): # # make it draw a border and align it # # painter.save() # img_bounds = Rect2D(obj) # img_bounds.stroke = 3 # # img_bounds.translate(-img_bounds.stroke/2, img_bounds.stroke/2) # img_bounds.color = 0xFFFF00 # img_bounds.fill_color = 0xFFFF00 # img_bounds.draw(painter=painter) # # print(img_bounds) # # painter.restore() obj.draw(painter=painter) if len(self.centered_objects) != 0: # change inset size first for obj in self.centered_objects: if isinstance(obj, Image2D): obj.setToWidth( rect_to_plot.width() * obj. fraction_of_parent_image_width_if_image_is_inset ) alignCenterH(center, *self.centered_objects) alignCenterV(center, *self.centered_objects) for obj in self.centered_objects: # # for drawing of inset borders # if isinstance(obj, Image2D): # # make it draw a border and align it # # painter.save() # img_bounds = Rect2D(obj) # img_bounds.stroke = 3 # img_bounds.color = 0xFFFF00 # img_bounds.fill_color = 0xFFFF00 # img_bounds.draw(painter=painter) # # print(img_bounds) # # painter.restore() obj.draw(painter=painter) # then need to draw the letter at last so that it is always on top if self.letter is not None: self.letter.draw(painter) painter.restore() # # TOP left 2 # scale_bar = ScaleBar(30, '<font color="#FF00FF">10µm</font>') # scale_bar.set_scale(self.get_scale()) # # scale_bar.set_P1(self.get_P1().x()+extra_space, self.get_P1().y()+extra_space) # scale_bar.set_P1(self.get_P1()) # alignLeft(top_left, scale_bar) # alignTop(top_left, scale_bar) # # scale_bar.set_P1(scale_bar.get_P1().x()-extra_space, scale_bar.get_P1().x()+extra_space) # scale_bar.drawAndFill(painter=painter) # TOP right 2 # scale_bar = ScaleBar(30, '<font color="#FF00FF">10µm</font>') # scale_bar.set_scale(self.get_scale()) # # scale_bar.set_P1(self.get_P1().x()+extra_space, self.get_P1().y()+extra_space) # scale_bar.set_P1(self.get_P1()) # alignRight(top_right, scale_bar) # alignTop(top_right, scale_bar) # # scale_bar.set_P1(scale_bar.get_P1().x()-extra_space, scale_bar.get_P1().x()+extra_space) # scale_bar.drawAndFill(painter=painter) # bottom left 2 # # big bug in scale --> the size of the stuff isn't respected # # 288 is the size of image 0 # scale_bar = ScaleBar(288, '<font color="#FF00FF">10µm</font>') # scale_bar.set_scale(self.get_scale()) # # scale_bar.set_P1(self.get_P1().x()+extra_space, self.get_P1().y()+extra_space) # scale_bar.set_P1(self.get_P1()) # alignLeft(bottom_left, scale_bar) # alignBottom(bottom_left, scale_bar) # # scale_bar.set_P1(scale_bar.get_P1().x()-extra_space, scale_bar.get_P1().x()+extra_space) # scale_bar.drawAndFill(painter=painter) # # bottom right 2 # scale_bar = ScaleBar(30, '<font color="#FF00FF">10µm</font>') # scale_bar.set_scale(self.get_scale()) # # scale_bar.set_P1(self.get_P1().x()+extra_space, self.get_P1().y()+extra_space) # scale_bar.set_P1(self.get_P1()) # alignRight(bottom_right, scale_bar) # alignBottom(bottom_right, scale_bar) # # scale_bar.set_P1(scale_bar.get_P1().x()-extra_space, scale_bar.get_P1().x()+extra_space) # scale_bar.drawAndFill(painter=painter) # add a bunch of inner objects that should be packed left if they exist, and some right and some top, etc # so that these objects are packed # maybe loop over them # could have as many text labels as desired # only one letter # as many insets as needed # # center 2 # scale_bar = ScaleBar(411/2, '<font color="#FFFFFF">10µm</font>') # scale_bar.set_scale(self.get_scale()) # # scale_bar.set_P1(self.get_P1().x()+extra_space, self.get_P1().y()+extra_space) # scale_bar.set_P1(self.get_P1()) # alignCenterH(center, scale_bar) # alignCenterV(center, scale_bar) # # scale_bar.set_P1(scale_bar.get_P1().x()-extra_space, scale_bar.get_P1().x()+extra_space) # scale_bar.drawAndFill(painter=painter) # TODO create 5 reference points for each object and align to those # loop over all extra objects that need be added # def fill(self, painter, draw=True): # if self.fill_color is None: # return # if draw: # painter.save() # painter.setOpacity(self.opacity) # if draw: # if self.img is not None: # qsource = QRectF(0, 0, self.img.get_width(), self.img.get_height()) # painter.drawImage(self, self.qimage , qsource) # else: # painter.drawRect(self) # painter.restore() # self.draw(painter=painter, draw=draw) # def drawAndFill(self, painter): # painter.save() # if self.img is not None: # qsource = QRectF(0, 0, self.img.get_width(), self.img.get_height()) # painter.drawImage(self, self.qimage , qsource) # else: # painter.drawRect(self) # painter.restore() # self.draw(painter=painter) # def __add__(self, other): def __or__(self, other): from epyseg.figure.row import Row # KEEP Really required to avoid circular imports return Row(self, other) # create a Fig with divide # def __truediv__(self, other): def __truediv__(self, other): from epyseg.figure.column import Column # KEEP Really required to avoid circular imports return Column(self, other) def __floordiv__(self, other): return self.__truediv__(other=other) # ai je vraiment besoin de ça ? en fait le ratio suffit et faut aussi que j'intègre le crop sinon va y avoir des erreurs # Force the montage width to equal 'width_in_px' def setToWidth(self, width_in_px): # pure_image_width = self.width() # ratio = width_in_px / pure_image_width # self.setWidth(width_in_px) # self.setHeight(self.height() * ratio) # # we recompute the scale for the scale bar # TODO BE CAREFUL IF EXTRAS ARE ADDED TO THE OBJECT AS THIS WOULD PERTURB THE COMPUTATIONS # self.update_scale() pure_image_width = self.width( scaled=False) # need original height and with in fact # if self.__crop_left is not None: # pure_image_width -= self.__crop_left # if self.__crop_right is not None: # pure_image_width -= self.__crop_right scale = width_in_px / pure_image_width self.scale = scale def setToHeight(self, height_in_px): # pure_image_height = self.height() # self.setHeight(height_in_px) # ratio = height_in_px / pure_image_height # self.setWidth(self.width() * ratio) # # we recompute the scale for the scale bar # TODO BE CAREFUL IF EXTRAS ARE ADDED TO THE OBJECT AS THIS WOULD PERTURB THE COMPUTATIONS # self.update_scale() pure_image_height = self.height(scaled=False) # if self.__crop_top is not None: # pure_image_height-=self.__crop_top # if self.__crop_bottom is not None: # pure_image_height-=self.__crop_bottom scale = height_in_px / pure_image_height self.scale = scale # def update_scale(self): # # we recompute the scale for the scale bar # TODO BE CAREFUL IF EXTRAS ARE ADDED TO THE OBJECT AS THIS WOULD PERTURB THE COMPUTATIONS # self.scale = self.get_scale() # def get_scale(self): # # we recompute the scale for the scale bar # TODO BE CAREFUL IF EXTRAS ARE ADDED TO THE OBJECT AS THIS WOULD PERTURB THE COMPUTATIONS # return self.width() / self.img.get_width() def crop(self, left=None, right=None, top=None, bottom=None, all=None): # print(self.boundingRect()) if left is not None: self.__crop_left = left # self.setWidth(self.img.get_width() - self.__crop_left) if right is not None: self.__crop_right = right # self.setWidth(self.img.get_width() - self.__crop_right) if top is not None: self.__crop_top = top # self.setHeight(self.img.get_height() - self.__crop_top) if bottom is not None: self.__crop_bottom = bottom # self.setHeight(self.img.get_height() - self.__crop_bottom) if all is not None: self.__crop_left = all self.__crop_right = all self.__crop_top = all self.__crop_bottom = all # self.setWidth(self.img.get_width() - self.__crop_left) # self.setWidth(self.img.get_width() - self.__crop_right) # self.setHeight(self.img.get_height() - self.__crop_top) # self.setHeight(self.img.get_height() - self.__crop_bottom) # see how to crop actually because I need to create a qimage # self.qimage = self.img.crop() # print(self.boundingRect()) # def set_to_scale(self, factor): # self.scale = factor def set_to_translation(self, translation): self.translation = translation def boundingRect(self, scaled=True): # en fait pas good besoin de prendre les crops et le scale en compte # is # rect_to_plot = self.adjusted(self.__crop_left, self.__crop_top, -self.__crop_right, -self.__crop_bottom) rect_to_plot = self.adjusted(0, 0, -self.__crop_right - self.__crop_left, -self.__crop_bottom - self.__crop_top) # rect_to_plot = self.adjusted(-self.__crop_left, -self.__crop_top, -self.__crop_right, -self.__crop_bottom) # rect_to_plot = self.adjusted(0,0,0,0) # print('begin rect_to_plot', rect_to_plot, self.scale) # if kwargs['draw']==True or kwargs['fill']==True: # if self.scale is None or self.scale==1: # painter.drawRect(self) # else: # on clone le rect if self.scale is not None and self.scale != 1 and scaled: # TODO KEEP THE ORDER THIS MUST BE DONE THIS WAY OR IT WILL GENERATE PLENTY OF BUGS... new_width = rect_to_plot.width() * self.scale new_height = rect_to_plot.height() * self.scale # print(rect_to_plot.width(), rect_to_plot.height()) # here ok # setX changes width --> why is that # TODO BE EXTREMELY CAREFUL AS SETX AND SETY CAN CHANGE WIDTH AND HEIGHT --> ALWAYS TAKE SIZE BEFORE OTHERWISE THERE WILL BE A PB AND ALWAYS RESET THE SIZE WHEN SETX IS CALLED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # Sets the left edge of the rectangle to the given x coordinate. May change the width, but will never change the right edge of the rectangle. --> NO CLUE WHY SHOULD CHANGE WIDTH THOUGH BUT BE CAREFUL!!! # rect_to_plot.setX(rect_to_plot.x() * self.scale) # rect_to_plot.setY(rect_to_plot.y() * self.scale) # maybe to avoid bugs I should use translate instead rather that set x but ok anyways # print(rect_to_plot.width(), rect_to_plot.height())# bug here --> too big # print(new_height, new_height, self.width(), self.scale, self.scale* self.width()) rect_to_plot.setWidth(new_width) rect_to_plot.setHeight(new_height) return rect_to_plot # def set_P1(self, *args): # if not args: # logger.error("no coordinate set...") # return # if len(args) == 1: # self.moveTo(args[0].x(), args[0].y()) # else: # self.moveTo(QPointF(args[0], args[1])) def get_P1(self): return self.boundingRect().topLeft() def width(self, scaled=True): return self.boundingRect(scaled=scaled).width() def height(self, scaled=True): return self.boundingRect(scaled=scaled).height()