def export_new_dataset(self, map, tile_size, step, basename): # create a black canvas of the same size of your map w = map.width() h = map.height() labelimg = QImage(w, h, QImage.Format_RGB32) labelimg.fill(qRgb(0, 0, 0)) # CREATE LABEL IMAGE for i, blob in enumerate(self.seg_blobs): if blob.qpath_gitem.isVisible(): if blob.class_color == "Empty": rgb = qRgb(255, 255, 255) else: rgb = qRgb(blob.class_color[0], blob.class_color[1], blob.class_color[2]) blob_mask = blob.getMask() for x in range(blob_mask.shape[1]): for y in range(blob_mask.shape[0]): if blob_mask[y, x] == 1: labelimg.setPixel(x + blob.bbox[1], y + blob.bbox[0], rgb) tile_cols = int((w - tile_size) / step) tile_rows = int((h - tile_size) / step) deltaW = int(tile_size / 2) + 1 deltaH = int(tile_size / 2) + 1 for row in range(tile_rows): for col in range(tile_cols): top = deltaH + row * step left = deltaW + col * step cropimg = utils.cropQImage(map, [top, left, tile_size, tile_size]) croplabel = utils.cropQImage(labelimg, [top, left, tile_size, tile_size]) filenameRGB = basename + "_RGB_" + str.format( "{0:02d}", (row)) + "_" + str.format("{0:02d}", (col)) + ".png" filenameLabel = basename + "_L_" + str.format( "{0:02d}", (row)) + "_" + str.format("{0:02d}", (col)) + ".png" cropimg.save(filenameRGB) croplabel.save(filenameLabel)
def splitBlob(self, map, blob, seeds): seeds = np.asarray(seeds) seeds = seeds.astype(int) mask = blob.getMask() box = blob.bbox cropimg = utils.cropQImage(map, box) cropimgnp = rgb2gray(utils.qimageToNumpyArray(cropimg)) edges = sobel(cropimgnp) # x,y seeds_matrix = np.zeros_like(mask) size = 40 # for i in range(0, seeds.shape[0]): #y,x seeds_matrix[seeds[i, 1] - box[0] - (size - 1):seeds[i, 1] - box[0] + (size - 1), seeds[i, 0] - box[1] - (size - 1):seeds[i, 0] - box[1] + (size - 1)] = 1 distance = ndi.distance_transform_edt(mask) # distance = ndi.distance_transform_edt(cropimg) seeds_matrix = seeds_matrix > 0.5 markers = ndi.label(seeds_matrix)[0] # labels = watershed(-distance, markers, mask=mask) labels = watershed((-distance + 100 * edges) / 2, markers, mask=mask) created_blobs = [] for region in measure.regionprops(labels): b = Blob(region, box[1], box[0], self.progressive_id) self.progressive_id += 1 b.class_color = blob.class_color b.class_name = blob.class_name created_blobs.append(b) return created_blobs
def export_tiles(self, basename, tilename, labels_info): """ Exports the tiles INSIDE the given areas (val_area and test_area are stored as (top, left, width, height)) The training tiles are the ones of the entire map minus the ones inside the test validation and test area. """ ##### VALIDATION AREA basenameVim = os.path.join(basename, os.path.join("validation", "images")) try: os.makedirs(basenameVim) except: pass basenameVlab = os.path.join(basename, os.path.join("validation", "labels")) try: os.makedirs(basenameVlab) except: pass half_tile_size = self.tile_size / 2 for i, sample in enumerate(self.validation_tiles): cx = sample[0] cy = sample[1] top = cy - half_tile_size left = cx - half_tile_size cropimg = utils.cropQImage( self.orthoimage, [top, left, self.tile_size, self.tile_size]) croplabel = utils.cropQImage( self.label_image, [top, left, self.tile_size, self.tile_size]) filenameRGB = os.path.join( basenameVim, tilename + str.format("_{0:04d}", (i)) + ".png") filenameLabel = os.path.join( basenameVlab, tilename + str.format("_{0:04d}", (i)) + ".png") cropimg.save(filenameRGB) croplabel.save(filenameLabel) ##### TEST AREA basenameTestIm = os.path.join(basename, os.path.join("test", "images")) try: os.makedirs(basenameTestIm) except: pass basenameTestLab = os.path.join(basename, os.path.join("test", "labels")) try: os.makedirs(basenameTestLab) except: pass for i, sample in enumerate(self.test_tiles): cx = sample[0] cy = sample[1] top = cy - half_tile_size left = cx - half_tile_size cropimg = utils.cropQImage( self.orthoimage, [top, left, self.tile_size, self.tile_size]) croplabel = utils.cropQImage( self.label_image, [top, left, self.tile_size, self.tile_size]) filenameRGB = os.path.join( basenameTestIm, tilename + str.format("_{0:04d}", (i)) + ".png") filenameLabel = os.path.join( basenameTestLab, tilename + str.format("_{0:04d}", (i)) + ".png") cropimg.save(filenameRGB) croplabel.save(filenameLabel) ##### TRAINING AREA = ENTIRE MAP / (VALIDATION AREA U TEST_AREA) basenameTrainIm = os.path.join(basename, os.path.join("training", "images")) try: os.makedirs(basenameTrainIm) except: pass basenameTrainLab = os.path.join(basename, os.path.join("training", "labels")) try: os.makedirs(basenameTrainLab) except: pass for i, sample in enumerate(self.training_tiles): cx = sample[0] cy = sample[1] top = cy - half_tile_size left = cx - half_tile_size cropimg = utils.cropQImage( self.orthoimage, [top, left, self.tile_size, self.tile_size]) croplabel = utils.cropQImage( self.label_image, [top, left, self.tile_size, self.tile_size]) filenameRGB = os.path.join( basenameTrainIm, tilename + str.format("_{0:04d}", (i)) + ".png") filenameLabel = os.path.join( basenameTrainLab, tilename + str.format("_{0:04d}", (i)) + ".png") cropimg.save(filenameRGB) croplabel.save(filenameLabel)
def export_new_dataset(self, map, tile_size, step, output_folder): # if the dataset folder already had DL subfolder than delete them output_folder_training = os.path.join(output_folder, "training") output_folder_validation = os.path.join(output_folder, "validation") output_folder_test = os.path.join(output_folder, "test") if os.path.exists(output_folder_training): shutil.rmtree(output_folder_training, ignore_errors=True) if os.path.exists(output_folder_validation): shutil.rmtree(output_folder_validation, ignore_errors=True) if os.path.exists(output_folder_test): shutil.rmtree(output_folder_test, ignore_errors=True) # create DL folders os.mkdir(output_folder_training) output_images_training = os.path.join(output_folder_training, "images") output_labels_training = os.path.join(output_folder_training, "labels") os.mkdir(output_images_training) os.mkdir(output_labels_training) os.mkdir(output_folder_validation) output_images_validation = os.path.join(output_folder_validation, "images") output_labels_validation = os.path.join(output_folder_validation, "labels") os.mkdir(output_images_validation) os.mkdir(output_labels_validation) os.mkdir(output_folder_test) output_images_test = os.path.join(output_folder_test, "images") output_labels_test = os.path.join(output_folder_test, "labels") os.mkdir(output_images_test) os.mkdir(output_labels_test) ##### CREATE LABEL IMAGE # create a black canvas of the same size of your map w = map.width() h = map.height() labelimg = QImage(w, h, QImage.Format_RGB32) labelimg.fill(qRgb(0, 0, 0)) painter = QPainter(labelimg) for i, blob in enumerate(self.seg_blobs): if blob.qpath_gitem.isVisible(): if blob.qpath_gitem.isVisible(): if blob.class_name == "Empty": rgb = qRgb(255, 255, 255) else: class_color = self.labels_info[blob.class_name] rgb = qRgb(class_color[0], class_color[1], class_color[2]) painter.setBrush(QBrush(QColor(rgb))) painter.drawPath(blob.qpath_gitem.path()) painter.end() ##### TILING h1 = h * 0.65 h2 = h * 0.85 # tiles within the height [0..h1] are used for the training # tiles within the height [h1..h2] are used for the validation # the other tiles are used for the test tile_cols = int((w + tile_size) / step) tile_rows = int((h + tile_size) / step) deltaW = int(tile_size / 2) + 1 deltaH = int(tile_size / 2) + 1 for row in range(tile_rows): for col in range(tile_cols): top = row * step - deltaH left = col * step - deltaW cropimg = utils.cropQImage(map, [top, left, tile_size, tile_size]) croplabel = utils.cropQImage(labelimg, [top, left, tile_size, tile_size]) filenameRGB = "" if top + tile_size < h1 - step: filenameRGB = os.path.join( output_images_training, "tile_" + str.format("{0:02d}", (row)) + "_" + str.format("{0:02d}", (col)) + ".png") filenameLabel = os.path.join( output_labels_training, "tile_" + str.format("{0:02d}", (row)) + "_" + str.format("{0:02d}", (col)) + ".png") elif top > h2 + step: filenameRGB = os.path.join( output_images_test, "tile_" + str.format("{0:02d}", (row)) + "_" + str.format("{0:02d}", (col)) + ".png") filenameLabel = os.path.join( output_labels_test, "tile_" + str.format("{0:02d}", (row)) + "_" + str.format("{0:02d}", (col)) + ".png") elif top + tile_size >= h1 + step and top <= h2 - step: filenameRGB = os.path.join( output_images_validation, "tile_" + str.format("{0:02d}", (row)) + "_" + str.format("{0:02d}", (col)) + ".png") filenameLabel = os.path.join( output_labels_validation, "tile_" + str.format("{0:02d}", (row)) + "_" + str.format("{0:02d}", (col)) + ".png") print(filenameRGB) print(filenameLabel) if filenameRGB != "": cropimg.save(filenameRGB) croplabel.save(filenameLabel)
def __init__(self, map, annotations, blob, x, y, parent=None): super(QtCrackWidget, self).__init__(parent) self.setStyleSheet("background-color: rgb(60,60,65); color: white") self.qimg_cropped = utils.cropQImage(map, blob.bbox) arr = utils.qimageToNumpyArray(self.qimg_cropped) self.input_arr = rgb2gray(arr) * 255 self.tolerance = 20 self.annotations = annotations self.blob = blob self.xmap = x self.ymap = y self.qimg_crack = QImage(self.qimg_cropped.width(), self.qimg_cropped.height(), QImage.Format_RGB32) self.qimg_crack.fill(qRgb(0, 0, 0)) self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.setFixedWidth(400) self.setFixedHeight(400) SLIDER_WIDTH = 200 IMAGEVIEWER_SIZE = 300 # SIZE x SIZE self.sliderTolerance = QSlider(Qt.Horizontal) self.sliderTolerance.setFocusPolicy(Qt.StrongFocus) self.sliderTolerance.setMinimumWidth(SLIDER_WIDTH) self.sliderTolerance.setMinimum(1) self.sliderTolerance.setMaximum(100) self.sliderTolerance.setValue(self.tolerance) self.sliderTolerance.setTickInterval(5) self.sliderTolerance.setAutoFillBackground(True) self.sliderTolerance.valueChanged.connect(self.sliderToleranceChanged) self.lblTolerance = QLabel("Tolerance: 20") self.lblTolerance.setAutoFillBackground(True) str = "Tolerance {}".format(self.tolerance) self.lblTolerance.setText(str) layoutTolerance = QHBoxLayout() layoutTolerance.addWidget(self.lblTolerance) layoutTolerance.addWidget(self.sliderTolerance) self.viewerplus = QtImageViewerPlus() self.viewerplus.disableScrollBars() self.viewerplus.setFixedWidth(IMAGEVIEWER_SIZE) self.viewerplus.setFixedHeight(IMAGEVIEWER_SIZE) self.btnCancel = QPushButton("Cancel") self.btnCancel.setAutoFillBackground(True) self.btnApply = QPushButton("Apply") self.btnApply.setAutoFillBackground(True) layoutButtons = QHBoxLayout() layoutButtons.addWidget(self.btnCancel) layoutButtons.addWidget(self.btnApply) layoutV = QVBoxLayout() layoutV.addLayout(layoutTolerance) layoutV.addWidget(self.viewerplus) layoutV.addLayout(layoutButtons) layoutV.setSpacing(10) self.setLayout(layoutV) self.viewerplus.setImage(self.qimg_cropped) self.preview() self.setAutoFillBackground(True) self.setWindowTitle("Crack") self.setWindowFlags(Qt.Window | Qt.CustomizeWindowHint | Qt.WindowTitleHint)
def run(self, img_map, TILE_SIZE, AGGREGATION_WINDOW_SIZE, AGGREGATION_STEP): """ :param TILE_SIZE: Base tile. This corresponds to the INPUT SIZE of the network. :param AGGREGATION_WINDOW_SIZE: Size of the sub-windows to consider for the aggregation. :param AGGREGATION_STEP: Step, in pixels, to calculate the different scores. :return: """ # create a temporary folder to store the processing temp_dir = "temp" if not os.path.exists(temp_dir): os.mkdir(temp_dir) # prepare for running.. STEP_SIZE = AGGREGATION_WINDOW_SIZE W = img_map.width() H = img_map.height() # top, left, width, height working_area = [0, 0, W, H] wa_top = working_area[0] wa_left = working_area[1] wa_width = working_area[2] wa_height = working_area[3] if wa_top < AGGREGATION_STEP: wa_top = AGGREGATION_STEP if wa_left < AGGREGATION_STEP: wa_left = AGGREGATION_STEP if wa_left + wa_width >= W - AGGREGATION_STEP: wa_width = W - AGGREGATION_STEP - wa_left - 1 if wa_top + wa_height >= H - AGGREGATION_STEP: wa_height = H - AGGREGATION_STEP - wa_top - 1 tile_cols = int(wa_width / AGGREGATION_WINDOW_SIZE) + 1 tile_rows = int(wa_height / AGGREGATION_WINDOW_SIZE) + 1 if torch.cuda.is_available(): device = torch.device("cuda") self.net.to(device) torch.cuda.synchronize() self.net.eval() # classification (per-tiles) tiles_number = tile_rows * tile_cols self.processing_step = 0 self.total_processing_steps = 19 * tiles_number for row in range(tile_rows): if self.flagStopProcessing is True: break for col in range(tile_cols): if self.flagStopProcessing is True: break scores = np.zeros((9, self.nclasses, TILE_SIZE, TILE_SIZE)) k = 0 for i in range(-1,2): for j in range(-1,2): top = wa_top - AGGREGATION_STEP + row * STEP_SIZE + i * AGGREGATION_STEP left = wa_left - AGGREGATION_STEP + col * STEP_SIZE + j * AGGREGATION_STEP cropimg = utils.cropQImage(img_map, [top, left, TILE_SIZE, TILE_SIZE]) img_np = utils.qimageToNumpyArray(cropimg) img_np = img_np.astype(np.float32) img_np = img_np / 255.0 # H x W x C --> C x H x W img_np = img_np.transpose(2, 0, 1) # Normalization (average subtraction) img_np[0] = img_np[0] - self.average_norm[0] img_np[1] = img_np[1] - self.average_norm[1] img_np[2] = img_np[2] - self.average_norm[2] with torch.no_grad(): img_tensor = torch.from_numpy(img_np) input = img_tensor.unsqueeze(0) if torch.cuda.is_available(): input = input.to(device) outputs = self.net(input) scores[k] = outputs[0].cpu().numpy() k = k + 1 self.processing_step += 1 self.updateProgress.emit( (100.0 * self.processing_step) / self.total_processing_steps ) QCoreApplication.processEvents() if self.flagStopProcessing is True: break # preds_avg, preds_bayesian = self.aggregateScores(scores, tile_sz=TILE_SIZE, # center_window_size=AGGREGATION_WINDOW_SIZE, step=AGGREGATION_STEP) preds_avg = self.aggregateScores(scores, tile_sz=TILE_SIZE, center_window_size=AGGREGATION_WINDOW_SIZE, step=AGGREGATION_STEP) values_t, predictions_t = torch.max(torch.from_numpy(preds_avg), 0) preds = predictions_t.cpu().numpy() resimg = np.zeros((preds.shape[0], preds.shape[1], 3), dtype='uint8') for label_index in range(self.nclasses): resimg[preds == label_index, :] = self.label_colors[label_index] tilename = str(row) + "_" + str(col) + ".png" filename = os.path.join(temp_dir, tilename) utils.rgbToQImage(resimg).save(filename) self.processing_step += 1 self.updateProgress.emit( (100.0 * self.processing_step) / self.total_processing_steps ) QCoreApplication.processEvents() # put tiles together qimglabel = QImage(W, H, QImage.Format_RGB32) xoffset = 0 yoffset = 0 painter = QPainter(qimglabel) for r in range(tile_rows): for c in range(tile_cols): tilename = str(r) + "_" + str(c) + ".png" filename = os.path.join(temp_dir, tilename) qimg = QImage(filename) xoffset = wa_left + c * AGGREGATION_WINDOW_SIZE yoffset = wa_top + r * AGGREGATION_WINDOW_SIZE cut = False W_prime = wa_width H_prime = wa_height if xoffset + AGGREGATION_WINDOW_SIZE > wa_left + wa_width - 1: W_prime = wa_width + wa_left - xoffset - 1 cut = True if yoffset + AGGREGATION_WINDOW_SIZE > wa_top + wa_height - 1: H_prime = wa_height + wa_top - yoffset - 1 cut = True if cut is True: qimg2 = qimg.copy(0, 0, W_prime, H_prime) painter.drawImage(xoffset, yoffset, qimg2) else: painter.drawImage(xoffset, yoffset, qimg) # detach the qimglabel otherwise the Qt EXPLODES when memory is free painter.end() labelfile = os.path.join(temp_dir, "labelmap.png") qimglabel.save(labelfile) torch.cuda.empty_cache() del self.net self.net = None
def run(self, TILE_SIZE, AGGREGATION_WINDOW_SIZE, AGGREGATION_STEP, save_scores=False): """ :param TILE_SIZE: Base tile. This corresponds to the INPUT SIZE of the network. :param AGGREGATION_WINDOW_SIZE: Size of the center window considered for the aggregation. :param AGGREGATION_STEP: Step, in pixels, to calculate the different scores. :return: """ # create a temporary folder to store the processing if not os.path.exists(self.temp_dir): os.mkdir(self.temp_dir) # prepare for running.. DELTA_CROP = int((TILE_SIZE - AGGREGATION_WINDOW_SIZE) / 2) tile_cols = int(self.wa_width / AGGREGATION_WINDOW_SIZE) + 1 tile_rows = int(self.wa_height / AGGREGATION_WINDOW_SIZE) + 1 if torch.cuda.is_available(): device = torch.device("cuda") self.net.to(device) torch.cuda.synchronize() self.net.eval() # classification (per-tiles) tiles_number = tile_rows * tile_cols self.processing_step = 0 self.total_processing_steps = 19 * tiles_number for row in range(tile_rows): if self.flagStopProcessing is True: break for col in range(tile_cols): if self.flagStopProcessing is True: break scores = np.zeros((9, self.nclasses, TILE_SIZE, TILE_SIZE)) k = 0 for i in range(-1, 2): for j in range(-1, 2): top = self.wa_top - DELTA_CROP + row * AGGREGATION_WINDOW_SIZE + i * AGGREGATION_STEP left = self.wa_left - DELTA_CROP + col * AGGREGATION_WINDOW_SIZE + j * AGGREGATION_STEP tileimg = utils.cropQImage( self.input_image, [top, left, TILE_SIZE, TILE_SIZE]) img_np = utils.qimageToNumpyArray(tileimg) img_np = img_np.astype(np.float32) img_np = img_np / 255.0 # H x W x C --> C x H x W img_np = img_np.transpose(2, 0, 1) # Normalization (average subtraction) img_np[0] = img_np[0] - self.average_norm[0] img_np[1] = img_np[1] - self.average_norm[1] img_np[2] = img_np[2] - self.average_norm[2] with torch.no_grad(): img_tensor = torch.from_numpy(img_np) input = img_tensor.unsqueeze(0) if torch.cuda.is_available(): input = input.to(device) outputs = self.net(input) scores[k] = outputs[0].cpu().numpy() k = k + 1 self.processing_step += 1 self.updateProgress.emit( (100.0 * self.processing_step) / self.total_processing_steps) QCoreApplication.processEvents() if self.flagStopProcessing is True: break preds_avg = self.aggregateScores( scores, tile_sz=TILE_SIZE, center_window_size=AGGREGATION_WINDOW_SIZE, step=AGGREGATION_STEP) values_t, predictions_t = torch.max( torch.from_numpy(preds_avg), 0) preds = predictions_t.cpu().numpy() resimg = np.zeros((preds.shape[0], preds.shape[1], 3), dtype='uint8') for label_index in range(self.nclasses): resimg[preds == label_index, :] = self.label_colors[label_index] tilename = str(row) + "_" + str(col) + ".png" filename = os.path.join(self.temp_dir, tilename) utils.rgbToQImage(resimg).save(filename) if save_scores is True: tilename = str(row) + "_" + str(col) + ".dat" filename = os.path.join(self.temp_dir, tilename) fileobject = open(filename, 'wb') pkl.dump(preds_avg, fileobject) fileobject.close() self.processing_step += 1 self.updateProgress.emit((100.0 * self.processing_step) / self.total_processing_steps) QCoreApplication.processEvents() self.assembleTiles(tile_rows, tile_cols, AGGREGATION_WINDOW_SIZE, ass_scores=save_scores) torch.cuda.empty_cache() del self.net self.net = None
def segmentation(self): # compute bbox of scribbles (working area) bboxes = [] for i, curve in enumerate(self.scribbles.points): bbox = Mask.pointsBox(curve, int(self.scribbles.size[i] / 2)) bboxes.append(bbox) working_area = Mask.jointBox(bboxes) if working_area[0] < 0: working_area[0] = 0 if working_area[1] < 0: working_area[1] = 0 if working_area[0] + working_area[3] > self.viewerplus.img_map.height( ) - 1: working_area[3] = self.viewerplus.img_map.height( ) - 1 - working_area[0] if working_area[1] + working_area[2] > self.viewerplus.img_map.width( ) - 1: working_area[2] = self.viewerplus.img_map.width( ) - 1 - working_area[1] crop_img = utils.cropQImage(self.viewerplus.img_map, working_area) crop_imgnp = utils.qimageToNumpyArray(crop_img) # create markers mask = np.zeros((working_area[3], working_area[2], 3), dtype=np.int32) color_codes = dict() counter = 1 for i, curve in enumerate(self.scribbles.points): col = self.scribbles.label[i].fill b = col[2] g = col[1] r = col[0] color = (b, g, r) color_code = b + 256 * g + 65536 * r color_key = str(color_code) if color_codes.get(color_key) is None: name = self.scribbles.label[i].name color_codes[color_key] = (counter, name) counter = counter + 1 curve = np.int32(curve) curve[:, 0] = curve[:, 0] - working_area[1] curve[:, 1] = curve[:, 1] - working_area[0] curve = curve.reshape((-1, 1, 2)) mask = cv2.polylines(mask, pts=[curve], isClosed=False, color=color, thickness=self.scribbles.size[i], lineType=cv2.LINE_8) mask = np.uint8(mask) markers = np.zeros((working_area[3], working_area[2]), dtype='int32') for label in self.scribbles.label: col = label.fill b = col[2] g = col[1] r = col[0] color_code = b + 256 * g + 65536 * r color_key = str(color_code) idx = np.where((mask[:, :, 0] == b) & (mask[:, :, 1] == g) & (mask[:, :, 2] == r)) (value, name) = color_codes[color_key] markers[idx] = value # markers = np.int32(255*rgb2gray(mask)) # markersprint = 255*rgb2gray(mask) markersprint = markers cv2.imwrite('mask.png', markersprint) # watershed segmentation segmentation = cv2.watershed(crop_imgnp, markers) segmentation = filters.median(segmentation, disk(5), mode="mirror") cv2.imwrite('segmentation.png', segmentation) # the result of the segmentation must be converted into labels again lbls = measure.label(segmentation) blobs = [] for region in measure.regionprops(lbls): blob = Blob(region, working_area[1], working_area[0], self.viewerplus.annotations.getFreeId()) color_index = segmentation[region.coords[0][0], region.coords[0][1]] data = list(color_codes.items()) index = 0 for i in range(len(data)): (color_code, t) = data[i] if t[0] == color_index: color_code = int(color_code) r = int(color_code / 65536) g = int(int(color_code - r * 65536) / 256) b = int(color_code - r * 65536 - g * 256) color = [r, g, b] name = t[1] break blob.class_color = color blob.class_name = name blobs.append(blob) return blobs