Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--tissue',
                        help='select tissue to train.',
                        default=None)
    parser.add_argument('--inputFolder',
                        help='Select input folder.',
                        default=None)
    parser.add_argument('--outputFolder',
                        help='select output folder',
                        default=None)
    parser.add_argument('--scale', help='select output folder', default=None)
    args = parser.parse_args()
    config = Config
    if args.tissue:
        config.diagnosis = [args.tissue]

    if args.outputFolder:
        config.outputFolder = args.outputFolder

    if args.scale:
        config.scale = int(args.scale)

    if args.tissue == 'Ganglioneuroma':
        n_freq = 20
    else:
        n_freq = 30
    print("Scale: " + args.scale)

    print(config.diagnosis)
    print(config.outputFolder)
    tools = Tools()

    annotated_nuclei = AnnotatedObjectSet()
    ids_images = glob.glob(
        os.path.join(args.inputFolder, config.diagnosis[0], 'images', '*.tif'))
    ids_masks = glob.glob(
        os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif'))

    # Create dataset for training the pix2pix-network based on image pairs
    #for index,elem in enumerate(ids_paths):
    for index, elem in enumerate(ids_images):
        test = AnnotatedImage()
        #test.readFromPath(tools.getLocalDataPath(elem[1],1),tools.getLocalDataPath(groundtruth_path[0],3))
        test.readFromPath(ids_images[index], ids_masks[index], type='uint16')
        enhanced_images = tools.enhanceImage(test,
                                             flip_left_right=True,
                                             flip_up_down=True,
                                             deform=False)
        for index, img in enumerate(enhanced_images):
            annotated_nuclei.addObjectImage(
                img, useBorderObjects=config.useBorderObjects)

    # Create the image pairs
    tools.createPix2pixDataset(annotated_nuclei,
                               config,
                               n_freq=n_freq,
                               tissue=args.tissue)

    e = 1
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--tissue',
                        help='select tissue to train.',
                        default=None)
    parser.add_argument('--inputFolder',
                        help='Select input folder.',
                        default=None)
    parser.add_argument('--outputFolder',
                        help='select output folder',
                        default=None)
    parser.add_argument('--scale', help='select output folder', default=None)
    parser.add_argument('--mode', help='select output folder', default='train')
    parser.add_argument('--resultsfile',
                        help='select output folder',
                        default=None)
    parser.add_argument('--overlap', help='select output folder', default=None)
    args = parser.parse_args()
    config = Config
    if args.tissue:
        config.diagnosis = [args.tissue]

    if args.outputFolder:
        config.outputFolder = args.outputFolder
    if args.scale == '1':
        config.scale = True
    if args.mode:
        config.mode = args.mode
    if args.resultsfile:
        config.resultsfile = args.resultsfile
    if args.overlap:
        config.overlap = int(args.overlap)

    print(config.diagnosis)
    print(config.outputFolder)
    print(config.scale)
    tools = Tools()

    annotated_nuclei = AnnotatedObjectSet()
    ids_images = glob.glob(
        os.path.join(args.inputFolder, config.diagnosis[0], 'images', '*.tif'))
    ids_masks = glob.glob(
        os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif'))

    # Create dataset for training the pix2pix-network based on image pairs
    #for index,elem in enumerate(ids_paths):
    for index, elem in enumerate(ids_images):
        test = AnnotatedImage()
        test.readFromPath(ids_images[index], ids_masks[index], type='uint16')
        #enhanced_images = tools.enhanceImage(test,flip_left_right=True,flip_up_down=True,deform=True)
        #for index,img in enumerate(enhanced_images):
        #    annotated_nuclei.addObjectImage(img,useBorderObjects=config.useBorderObjects)
        annotated_nuclei.addObjectImage(
            test,
            useBorderObjects=config.useBorderObjects,
            path_to_img=ids_images[index])
    # Create and save the image tiles
    tools.createAndSaveTiles(annotated_nuclei, config)
Ejemplo n.º 3
0
    def createPix2pixDataset(self, annotated_nuclei, config):
        images = []
        masks = []

        for i in range(0, annotated_nuclei.images.__len__()):
            images.append(annotated_nuclei.images[i].getRaw())
            masks.append(annotated_nuclei.images[i].getMask())

        # Get scales from masks
        print("Calculate mean object size ...")
        #scales_for_conv = self.getNormalizedScales(masks)
        scales_for_conv = self.getNormalizedScales(annotated_nuclei.images)

        # Rescale and Tile
        print("Rescale and tile images and masks ...")
        [images, masks, t, t, t] = self.rescaleAndTile(images=images,
                                                       masks=masks,
                                                       scales=scales_for_conv,
                                                       overlap=20,
                                                       rescale=config.scale)

        # Create artificial dataset
        if (config.diagnosis.__len__() > 1):
            img_name = 'combined'
        else:
            img_name = config.diagnosis[0]

        print("Create artificial dataset ...")
        for i in range(0, images.__len__() - 1):
            img_nat = AnnotatedImage()
            img_nat.createWithArguments(images[i], masks[i])
            img_art = ArtificialAnnotatedImage
            img_art = img_art.transformToArtificialImage(
                img_nat, useBorderObjects=config.useBorderObjects)
            img_combined = np.zeros(
                (images[0].shape[0], images[0].shape[1] * 2), np.float32)
            img_combined[:, 0:INPUT_SHAPE[1]] = img_nat.getRaw()
            img_combined[:,
                         INPUT_SHAPE[1]:INPUT_SHAPE[1] * 2] = img_art.getRaw()
            plt.imshow(img_combined, cmap='gray')
            img_to_sav = np.zeros(
                (img_combined.shape[0], img_combined.shape[1], 3), np.float32)
            img_to_sav[:, :, 0] = img_combined
            img_to_sav[:, :, 1] = img_combined
            img_to_sav[:, :, 2] = img_combined
            #scipy.misc.toimage(img_to_sav, cmin=0.0, cmax=1.0).save(config.outputPath + config.outputFolder + '\\Img_' + str(i) + '.jpg')

            scipy.misc.toimage(img_to_sav, cmin=0.0,
                               cmax=1.0).save(config.outputFolder + '\\Img_' +
                                              img_name + '_' + str(i) + '.jpg')
            e = 1
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--tissue', help='select tissue to train.', default="Ganglioneuroma")
    parser.add_argument('--inputFolder', help='Select input folder.', default=None)
    parser.add_argument('--outputFolder', help='select output folder', default=None)
    parser.add_argument('--scale', help='select output folder', default=None)
    parser.add_argument('--mode', help='select output folder', default='train')
    parser.add_argument('--resultsfile', help='select output folder', default=None)
    parser.add_argument('--overlap', help='select output folder', default=None)
    parser.add_argument('--ending', help='select output folder', default=None)
    parser.add_argument('--scalesize', help='select output folder', default=None)
    args = parser.parse_args()
    
    config = Config
    if args.tissue:
        config.diagnosis = [args.tissue]

    if args.outputFolder:
        config.outputFolder = args.outputFolder
    if args.scale == '1':
        config.scale=True
    if args.mode:
        config.mode=args.mode
    if args.resultsfile:
        config.resultsfile=args.resultsfile
    if args.overlap:
        config.overlap=int(args.overlap)

    print(config.diagnosis)
    print(config.outputFolder)
    print(config.scale)
    tools = Tools()

    def takeSecond(elem):
        return os.path.basename(elem)

    annotated_nuclei = AnnotatedObjectSet()
    print("Input folder: " + args.inputFolder)
    ids_images = glob.glob(os.path.join(args.inputFolder,'*.' + args.ending))
    ids_images.sort(key=takeSecond)

    # Create dataset for training the pix2pix-network based on image pairs
    #for index,elem in enumerate(ids_paths):
    for index, elem in enumerate(ids_images):
        print(ids_images[index])
        test = AnnotatedImage()
        test.readFromPathOnlyImage(ids_images[index])
        #enhanced_images = tools.enhanceImage(test,flip_left_right=True,flip_up_down=True,deform=True)
        #for index,img in enumerate(enhanced_images):
        #    annotated_nuclei.addObjectImage(img,useBorderObjects=config.useBorderObjects)
        annotated_nuclei.addObjectImage(test, useBorderObjects=config.useBorderObjects,path_to_img=ids_images[index])
        cv2.imwrite(r"/root/flo/tmp/test_before_tiling.jpg",test.getRaw())
    # Create and save the image tiles
    cv2.imwrite(r"/root/flo/tmp/test_before_tiling.jpg",test.getRaw())
    tools.createAndSaveTilesForSampleSegmentation(annotated_nuclei,config,float(args.scalesize))
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--tissue', help='select tissue to train.', default=None)
    parser.add_argument('--inputFolder', help='Select input folder.', default=None)
    parser.add_argument('--outputFolder', help='select output folder', default=None)
    parser.add_argument('--nr_images', help='select number of images to create', default=None)
    parser.add_argument('--overlapProbability', help='select overlapProbability', default=None)
    parser.add_argument('--samplingrate', help='how fine the contour shall be sampled', default=None)
    parser.add_argument('--ending', help='how fine the contour shall be sampled', default=None)
    args = parser.parse_args()

    config = Config
    if args.tissue:
        config.diagnosis = [args.tissue]

    if args.outputFolder:
        config.outputFolder = args.outputFolder

    #print(config.diagnosis)
    print(args.ending)
    print(args.inputFolder)
    tools = Tools()
    svg_tools = SVGTools(samplingrate=int(args.samplingrate))
    folder=args.inputFolder #=r"\\chubaka\home\florian.kromp\settings\desktop\nucleusanalyzer\FFG COIN VISIOMICS\Ongoing\EvaluationMetrics\HaCat_grown\10"
    print(os.path.join(folder,"*[!mask]."+args.ending))
    #images = glob.glob(os.path.join(folder,"*[!mask]."+args.ending))
    images = glob.glob(os.path.join(folder,"*."+args.ending))

    masks = [x.replace('.' + os.path.basename(x).split('.')[1],"_mask.TIF") for x in images] 
    print (images)
    print(masks)
    for index,elem in enumerate(images):

        img = AnnotatedImage()
        img.readFromPath(images[index], masks[index])
        cv2.imwrite(os.path.join(folder, os.path.basename(elem).replace('.'+os.path.basename(elem).split('.')[1],'_raw.jpg')),(img.getRaw() * 255.0).astype(np.uint8))
        svg_tools.openSVG(img.getRaw().shape[0],img.getRaw().shape[1])
        svg_tools.addRawImage(name='Raw image', img_path=(os.path.basename(elem).replace('.'+os.path.basename(elem).split('.')[1],'_raw.jpg')))
        svg_tools.addMaskLayer(img.getMask(),'Single nuclei','#00FF00',0.5)
        svg_tools.closeSVG()


        print("Path to SVG: " + os.path.join(folder, os.path.basename(elem).replace('.' + os.path.basename(elem).split('.')[1],'_svg.svg')))
        svg_tools.writeToPath(os.path.join(folder, os.path.basename(elem).replace('.' + os.path.basename(elem).split('.')[1],'_svg.svg')))
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--tissue',
                        help='select tissue to train.',
                        default=None)
    parser.add_argument('--inputFolder',
                        help='Select input folder.',
                        default=None)
    parser.add_argument('--outputFolder',
                        help='select output folder',
                        default=None)
    parser.add_argument('--nr_images',
                        help='select number of images to create',
                        default=None)
    parser.add_argument('--overlapProbability',
                        help='select overlapProbability',
                        default=None)
    parser.add_argument('--scale', help='select output folder', default=None)

    args = parser.parse_args()
    tisquant = TisQuantExtract()
    config = Config
    if args.tissue:
        config.diagnosis = [args.tissue]

    if args.outputFolder:
        config.outputFolder = args.outputFolder

    if args.overlapProbability:
        args.overlapProbability = float(args.overlapProbability)
    else:
        args.overlapProbability = 0.5
    if args.scale == '1':
        config.scale = True

    print(config.diagnosis)
    tools = Tools()

    annotated_nuclei = AnnotatedObjectSet()
    annotated_images = []
    #ids_paths = tisquant.dbconnector.execute(query=tisquant.getLevel3AnnotatedImagesByDiagnosis_Query(diagnosis = config.diagnosis,magnification = config.magnification, staining_type = config.staining_type, staining = config.staining, segmentation_function = config.segmentation_function, annotator = config.annotator, device = config.device))
    ids_images = glob.glob(
        os.path.join(args.inputFolder, config.diagnosis[0], 'images', '*.tif'))
    ids_masks = glob.glob(
        os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif'))

    #for index,elem in enumerate(ids_paths):
    for index, elem in enumerate(ids_images):
        #groundtruth_paths = tisquant.dbconnector.execute(tisquant.getLevel3AnnotationByImageId_Query(elem[0],config.annotator))
        #groundtruth_paths = tisquant.dbconnector.execute(tisquant.getLevel3AnnotationByImageIdUsingMaxExperience_Query(elem[0], config.annotator))
        #for groundtruth_path in groundtruth_paths:
        test = AnnotatedImage()
        #    test.readFromPath(tools.getLocalDataPath(elem[1],1),tools.getLocalDataPath(groundtruth_path[0],3))
        test.readFromPath(ids_images[index], ids_masks[index])
        annotated_images.append(test)

    # Create artificial new dataset
    scales = tools.getNormalizedScales(annotated_images)

    for index, img in enumerate(annotated_images):
        test = AnnotatedImage()
        if config.scale:
            test.createWithArguments(
                tools.rescale_image(img.getRaw(),
                                    (scales[index], scales[index])),
                tools.rescale_mask(img.getMask(),
                                   (scales[index], scales[index]),
                                   make_labels=True))
        else:
            test.createWithArguments(img.getRaw(), img.getMask())
        annotated_nuclei.addObjectImage(
            test, useBorderObjects=config.useBorderObjects)
    if config.scale == 0:
        if args.tissue == 'Ganglioneuroma':
            possible_numbers = [9, 16, 25, 36, 49]
        else:
            possible_numbers = [4, 4, 9]
    else:
        possible_numbers = [9, 16, 25, 36, 49]

    # How many images?
    if not args.nr_images:
        args.nr_images = 10
    else:
        args.nr_images = int(args.nr_images)

    for t in tqdm(range(0, args.nr_images)):
        # Create artificial image
        number_nuclei = random.randint(0, possible_numbers.__len__() - 1)
        img = ArtificialAnnotatedImage(
            width=256,
            height=256,
            number_nuclei=possible_numbers[number_nuclei],
            probabilityOverlap=args.overlapProbability)
        total_added = 0
        for i in range(0, possible_numbers[number_nuclei]):
            test = annotated_nuclei.returnArbitraryObject()
            if (randint(0, 1)):
                test = tools.arbitraryEnhance(test)
                total_added += img.addImageAtGridPosition(test)
        if (total_added > 0):
            shape_y = img.getRaw().shape[0]
            shape_x = img.getRaw().shape[1]
            img_new = np.zeros((shape_y, shape_x * 2, 3), dtype=np.float32)
            img_new[:, 0:shape_x,
                    0] = img_new[:, 0:shape_x,
                                 1] = img_new[:, 0:shape_x,
                                              2] = img_new[:,
                                                           shape_x:2 * shape_x,
                                                           0] = img_new[:,
                                                                        shape_x:
                                                                        2 *
                                                                        shape_x,
                                                                        1] = img_new[:,
                                                                                     shape_x:
                                                                                     2
                                                                                     *
                                                                                     shape_x,
                                                                                     2] = img.getRaw(
                                                                                     )
            scipy.misc.toimage(
                img_new, cmin=0.0,
                cmax=1.0).save(config.outputFolder + config.diagnosis[0] +
                               '\\images\\Img_' + str(t) + '.jpg')
            tifffile.imsave(config.outputFolder + config.diagnosis[0] +
                            '\\masks\\Mask_' + str(t) + '.tif',
                            img.getMask(),
                            dtype=np.uint8)
    e = 1
Ejemplo n.º 7
0
    def arbitraryEnhance(self, annotated_image):
        x = annotated_image.getRaw()
        y = annotated_image.getMask()
        try:
            xrange
        except NameError:
            xrange = range

        if randint(0, 1):  # flip horizontally
            x = np.fliplr(x)
            y = np.fliplr(y)
        if randint(0, 1):  # flipping vertically
            x = np.flipud(x)
            y = np.flipud(y)
        if 0:  #randint(0,1):  # deform
            def_func = self.elastic_transformations(2000, 60, x.shape)
            x = def_func(x)
            y_new = np.zeros((y.shape[0], y.shape[1]), dtype=np.uint16)
            for z in xrange(0, y.max() + 1):
                y_tmp = def_func((y == z) * 255)
                y_new = y_new + (z * (y_tmp == 255)).astype(np.uint16)
            y = y_new
        if randint(0, 1):  # rotate
            x_rot = np.zeros_like(x)
            y_rot = np.zeros_like(y)
            rot_angle = np.random.randint(-90, 90)
            x = trf.rotate(x, rot_angle)
            y = trf.rotate(y.squeeze(), rot_angle, order=0)
        if randint(0, 1):  # enhance brightness
            x[x < 0] = 0.0
            x[x > 1.0] = 1.0
            x = x + uniform(-np.absolute(0.3 - x.mean()),
                            np.absolute(0.3 - x.mean()))
            #img = Image.fromarray(skimage.img_as_ubyte(x))
            #contrast = ImageEnhance.Brightness(img)
            #contrast = contrast.enhance(np.random.uniform(0.5,1.5))
            #x = np.asarray(contrast).astype(np.float32)
            x[x < 0] = 0
            x[x > 1] = 1.0
        if randint(0, 1):  # gaussian
            x = x * 255.0
            x = x + np.random.normal(0, 2, [x.shape[0], x.shape[1]])
            x[x < 0] = 0
            x[x > 255] = 255
            x = x / 255.0
        if randint(0, 1):  #blur
            x = x * 255.0
            kernel_size = np.random.randint(1, 3)
            if (kernel_size % 2 == 0):
                kernel_size = kernel_size + 1
            x = cv2.GaussianBlur(x, (kernel_size, kernel_size), 0)
            x[x < 0] = 0
            x[x > 255] = 255
            x = x / 255.0
        if randint(0, 1):
            range_scale = uniform(0.8, 1.2)
            x = ski_transform.resize(
                x,
                (int(x.shape[0] * range_scale), int(x.shape[1] * range_scale)),
                mode='reflect')
            y = (ski_transform.resize(
                y,
                (int(y.shape[0] * range_scale), int(y.shape[1] * range_scale)),
                mode='reflect') > 0.5)
        img_new = AnnotatedImage()
        img_new.createWithArguments(x, y)
        return img_new
Ejemplo n.º 8
0
    def enhanceImage(self,
                     img,
                     flip_left_right=None,
                     flip_up_down=None,
                     deform=None):
        img_list = []
        img_list.append(img)

        try:
            xrange
        except NameError:
            xrange = range
        # flipping
        if flip_left_right:
            for i in xrange(0, img_list.__len__()):
                x = img_list[i].getRaw()
                y = img_list[i].getMask()
                x = np.fliplr(x)
                y = np.fliplr(y)
                img_new = AnnotatedImage()
                img_new.createWithArguments(x, y)
                img_list.append(img_new)
        if flip_up_down:
            for i in xrange(0, img_list.__len__()):
                x = img_list[i].getRaw()
                y = img_list[i].getMask()
                x = np.flipud(x)
                y = np.flipud(y)
                img_new = AnnotatedImage()
                img_new.createWithArguments(x, y)
                img_list.append(img_new)
        if deform:
            for i in xrange(0, img_list.__len__()):
                x = img_list[i].getRaw()
                y = img_list[i].getMask()
                for t in xrange(0, 5):
                    def_func = self.elastic_transformations(2000, 60, x.shape)
                    x = def_func(x)
                    y_new = np.zeros((y.shape[0], y.shape[1]), dtype=np.uint16)
                    for z in xrange(0, y.max() + 1):
                        y_tmp = def_func((y == z) * 255)
                        y_new = y_new + (z * (y_tmp == 255)).astype(np.uint16)
                    y = y_new
                    img_new = AnnotatedImage()
                    img_new.createWithArguments(x, y)
                    img_list.append(img_new)

        return img_list
Ejemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--tissue',
                        help='select tissue to train.',
                        default=None)
    parser.add_argument('--inputFolder',
                        help='Select input folder.',
                        default=None)
    parser.add_argument('--outputFolder',
                        help='select output folder',
                        default=None)
    parser.add_argument('--scale', help='select output folder', default=None)
    args = parser.parse_args()
    tisquant = TisQuantExtract()
    config = Config
    if args.tissue:
        config.diagnosis = [args.tissue]

    if args.outputFolder:
        config.outputFolder = args.outputFolder

    if args.scale:
        config.scale = int(args.scale)

    print("Scale: " + args.scale)

    print(config.diagnosis)
    print(config.outputFolder)
    tools = Tools()

    annotated_nuclei = AnnotatedObjectSet()
    #ids_paths = tisquant.dbconnector.execute(query=tisquant.getLevel3AnnotatedImagesByDiagnosis_Query(diagnosis = config.diagnosis,magnification = config.magnification, staining_type = config.staining_type, staining = config.staining, segmentation_function = config.segmentation_function, annotator = config.annotator, device = config.device))
    ids_images = glob.glob(
        os.path.join(args.inputFolder, config.diagnosis[0], 'images', '*.tif'))
    ids_masks = glob.glob(
        os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif'))

    # Create dataset for training the pix2pix-network based on image pairs
    #for index,elem in enumerate(ids_paths):
    for index, elem in enumerate(ids_images):
        #groundtruth_paths = tisquant.dbconnector.execute(tisquant.getLevel3AnnotationByImageId_Query(elem[0],config.annotator)) # Pathes from groundtruth from all annotators
        #groundtruth_paths = tisquant.dbconnector.execute(tisquant.getLevel3AnnotationByImageIdUsingMaxExperience_Query(elem[0], config.annotator)) # Pathes from groundtruth from most experienced annotator
        #for groundtruth_path in groundtruth_paths:
        test = AnnotatedImage()
        #test.readFromPath(tools.getLocalDataPath(elem[1],1),tools.getLocalDataPath(groundtruth_path[0],3))
        test.readFromPath(ids_images[index], ids_masks[index])
        enhanced_images = tools.enhanceImage(test,
                                             flip_left_right=True,
                                             flip_up_down=True,
                                             deform=True)
        for index, img in enumerate(enhanced_images):
            annotated_nuclei.addObjectImage(
                img, useBorderObjects=config.useBorderObjects)

    # Create the image pairs
    tools.createPix2pixDataset(annotated_nuclei, config)
    """
    #img = ArtificialAnnotatedImage.transformToArtificialImage(annotated_nuclei.images[0])
    img = ArtificialAnnotatedImage(width=256,height=256)
    for i in range(0,10):
        test = annotated_nuclei.returnArbitraryObject()
        img.addImageAtRandomPosition(test)
    plt.figure(1)
    plt.imshow(img.getRaw(),cmap='gray')
    plt.figure(2)
    plt.imshow(img.getMask())
    plt.show()
    """
    e = 1
Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--tissue',
                        help='select tissue to train.',
                        default=None)
    parser.add_argument('--inputFolder',
                        help='Select input folder.',
                        default=None)
    parser.add_argument('--outputFolder',
                        help='select output folder',
                        default=None)
    parser.add_argument('--nr_images',
                        help='select number of images to create',
                        default=None)
    parser.add_argument('--overlapProbability',
                        help='select overlapProbability',
                        default=None)
    parser.add_argument('--samplingrate',
                        help='how fine the contour shall be sampled',
                        default=None)
    args = parser.parse_args()
    tisquant = TisQuantExtract()
    config = Config
    if args.tissue:
        config.diagnosis = [args.tissue]

    if args.outputFolder:
        config.outputFolder = args.outputFolder

    print(config.diagnosis)
    tools = Tools()
    svg_tools = SVGTools(samplingrate=args.samplingrate)

    ids_paths = tisquant.dbconnector.execute(
        query=tisquant.getLevel3AnnotatedImagesByDiagnosis_Query(
            diagnosis=config.diagnosis,
            magnification=config.magnification,
            staining_type=config.staining_type,
            staining=config.staining,
            segmentation_function=config.segmentation_function,
            annotator=config.annotator,
            device=config.device))

    for index, elem in enumerate(ids_paths):
        groundtruth_path_l3 = tisquant.dbconnector.execute(
            tisquant.getLevel3AnnotationByImageIdUsingMaxExperience_Query(
                elem[0], config.annotator))[0]
        groundtruth_path_l2 = tisquant.dbconnector.execute(
            tisquant.getLevel2AnnotationByImageIdUsingMaxExperience_Query(
                elem[0], config.annotator))[0]
        #copyfile(tools.getLocalDataPath(elem[1], 1),os.path.join(config.outputFolder, config.diagnosis[0], str(elem[0]) + '.tif'))
        img = AnnotatedImage()
        img.readFromPath(tools.getLocalDataPath(elem[1], 1),
                         tools.getLocalDataPath(groundtruth_path_l2[0], 3))
        cv2.imwrite(
            os.path.join(config.outputFolder, config.diagnosis[0],
                         str(elem[0]) + '_raw.jpg'),
            (img.getRaw() * 255.0).astype(np.uint8))
        svg_tools.openSVG(img.getRaw().shape[0], img.getRaw().shape[1])
        #svg_tools.addRawImage(name='Raw image',img_path=os.path.join(config.outputFolder, config.diagnosis[0], str(elem[0]) + '_raw.jpg'))
        svg_tools.addRawImage(name='Raw image',
                              img_path=(str(elem[0]) + '_raw.jpg'))
        svg_tools.addMaskLayer(img.getMask()[:, :, 0], 'Not annotated',
                               '#0000FF', 0.5)
        svg_tools.addMaskLayer(img.getMask()[:, :, 2], 'Clumps', '#FF0000',
                               0.5)
        img.readFromPath(tools.getLocalDataPath(elem[1], 1),
                         tools.getLocalDataPath(groundtruth_path_l3[0], 3))
        svg_tools.addMaskLayer(img.getMask(), 'Single nuclei', '#00FF00', 0.5)
        svg_tools.closeSVG()

        svg_tools.writeToPath(
            os.path.join(config.outputFolder, config.diagnosis[0],
                         str(elem[0]) + '_svg.svg'))
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--tissue', help='select tissue to train.', default=None)
    parser.add_argument('--inputFolder', help='Select input folder.', default=None)
    parser.add_argument('--outputFolder', help='select output folder', default=None)
    parser.add_argument('--nr_images', help='select number of images to create', default=None)
    parser.add_argument('--overlapProbability', help='select overlapProbability', default=None)
    parser.add_argument('--scale', help='select output folder', default=None)
    parser.add_argument('--img_prefix', help='select output folder', default='Img_')
    parser.add_argument('--mask_prefix', help='select output folder', default='Mask_')
    #random.seed(13431)
    args = parser.parse_args()
    config = Config
    if args.tissue:
        config.diagnosis = [args.tissue]

    if args.outputFolder:
        config.outputFolder = args.outputFolder

    if args.overlapProbability:
        args.overlapProbability = float(args.overlapProbability)
    else:
        args.overlapProbability = 0.5

    if args.tissue == 'Ganglioneuroma':
        n_freq = 20#15
    else:
        n_freq = 30

    if args.scale == '1':
        config.scale=True

    print(config.diagnosis)
    tools = Tools()

    annotated_nuclei =[]
    annotated_images = []
    ids_images = glob.glob(os.path.join(args.inputFolder,config.diagnosis[0],'images','*.tif'))
    ids_masks = glob.glob(os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif'))

    for index, elem in enumerate(ids_images):
        test = AnnotatedImage()
        test.readFromPath(ids_images[index], ids_masks[index],type='uint16')
        annotated_images.append(test)

    # Create artificial new dataset
    scales = tools.getNormalizedScales(annotated_images)
    running = 0
    for index,img in enumerate(annotated_images):
        test = AnnotatedImage()
        annotated_nuclei.append(AnnotatedObjectSet())
        if config.scale:
            test.createWithArguments(tools.rescale_image(img.getRaw(),(scales[index],scales[index])),tools.rescale_mask(img.getMask(),(scales[index],scales[index]), make_labels=True))
        else:
            test.createWithArguments(img.getRaw(),img.getMask())
        annotated_nuclei[running].addObjectImage(test, useBorderObjects=config.useBorderObjects, tissue=args.tissue, scale=Config.scale)
        running += 1
        del test
    if config.scale == 0:
            if args.tissue == 'Ganglioneuroma':
                possible_numbers = [9, 16, 25, 36, 49]
            else:
                possible_numbers = [4, 4, 9]
    else:
        possible_numbers = [9,16,25,36,49]

    # How many images?
    if not args.nr_images:
        args.nr_images=10
    else:
        args.nr_images=int(args.nr_images)

    for t in tqdm(range(0,args.nr_images)):
        nr_img = random.randint(0,annotated_nuclei.__len__()-1)
        # Create artificial image
        number_nuclei = random.randint(0, possible_numbers.__len__()-1)

        # calculate Background
        tmp_image = annotated_nuclei[nr_img].images[0].getRaw()
        tmp_mask = annotated_nuclei[nr_img].images[0].getMask()
        kernel = np.ones((15, 15), np.uint8)
        bg = cv2.erode((tmp_mask == 0).astype(np.uint8), kernel, iterations=1)
        bg = np.sort(tmp_image[np.where(bg>0)])
        img = ArtificialAnnotatedImage(width=256,height=256,number_nuclei=possible_numbers[number_nuclei],probabilityOverlap=args.overlapProbability,background=bg)
        total_added = 0
        for i in range(0,possible_numbers[number_nuclei]):
            test = annotated_nuclei[nr_img].returnArbitraryObject()
            if (randint(0,1)):
                test = tools.arbitraryEnhance(test)
                total_added += img.addImageAtGridPosition(test)
        if (total_added > 0):
            shape_y = img.getRaw().shape[0]
            shape_x = img.getRaw().shape[1]
            img_new = np.zeros((shape_y,shape_x*2,3),dtype=np.float32)
            img_new[:,0:shape_x,0] = img_new[:,0:shape_x,1] = img_new[:,0:shape_x,2] = img_new[:,shape_x:2*shape_x,0] = img_new[:,shape_x:2*shape_x,1] = img_new[:,shape_x:2*shape_x,2] = img.getRaw()
            scipy.misc.toimage(img_new, cmin=0.0, cmax=1.0).save(config.outputFolder + config.diagnosis[0] + '\\images\\' + args.img_prefix + str(t) + '.jpg')
            tifffile.imsave(config.outputFolder + config.diagnosis[0] + '\\masks\\' + args.mask_prefix + str(t) + '.tif',img.getMask(),dtype=np.uint8)
    e=1
Ejemplo n.º 12
0
    def arbitraryEnhance(self, annotated_image):
        x = annotated_image.getRaw()
        y = annotated_image.getMask()
        try:
            xrange
        except NameError:
            xrange = range

        if randint(0, 1):  # flip horizontally
            x = np.fliplr(x)
            y = np.fliplr(y)
        if randint(0, 1):  # flipping vertically
            x = np.flipud(x)
            y = np.flipud(y)
        if 0:  #randint(0,1):  # deform
            def_func = self.elastic_transformations(2000, 60, x.shape)
            x = def_func(x)
            y_new = np.zeros((y.shape[0], y.shape[1]), dtype=np.uint16)
            for z in xrange(0, y.max() + 1):
                y_tmp = def_func((y == z) * 255)
                y_new = y_new + (z * (y_tmp == 255)).astype(np.uint16)
            y = y_new
        if randint(0, 1):  # rotate
            x_rot = np.zeros_like(x)
            y_rot = np.zeros_like(y)
            rot_angle = np.random.randint(-90, 90)
            x = trf.rotate(x, rot_angle)
            y = trf.rotate(y.squeeze(), rot_angle, order=0)
        if 0:  #randint(0, 1): # enhance brightness
            nucl_pixels = x * y
            pixels = np.where(nucl_pixels > 0)
            x[x < 0] = 0.0
            x[x > 1.0] = 1.0
            if ((nucl_pixels[pixels].mean() > 0.2)
                    and (nucl_pixels[pixels].mean() < 0.5)):
                x[pixels] += uniform(0, 0.3)
            elif (nucl_pixels[pixels].mean() < 0.8):
                x[pixels] -= uniform(0, 0.3)
            x[x < 0] = 0
            x[x > 1] = 1.0
        if randint(0, 1):  # gaussian
            x = x * 255.0
            x = x + np.random.normal(0, 2, [x.shape[0], x.shape[1]])
            x[x < 0] = 0
            x[x > 255] = 255
            x = x / 255.0
        if randint(0, 1):  #blur
            x = x * 255.0
            kernel_size = np.random.randint(1, 3)
            if (kernel_size % 2 == 0):
                kernel_size = kernel_size + 1
            x = cv2.GaussianBlur(x, (kernel_size, kernel_size), 0)
            x[x < 0] = 0
            x[x > 255] = 255
            x = x / 255.0
        if randint(0, 1):
            pixels = np.where(y > 0)
            range_scale = uniform(0.8, 1.2)
            x = ski_transform.resize(
                x,
                (int(x.shape[0] * range_scale), int(x.shape[1] * range_scale)),
                mode='reflect')
            y = (ski_transform.resize(
                y,
                (int(y.shape[0] * range_scale), int(y.shape[1] * range_scale)),
                mode='reflect') > 0.5)
        img_new = AnnotatedImage()
        img_new.createWithArguments(x, y)
        return img_new
Ejemplo n.º 13
0
    def createPix2pixDataset(self,
                             annotated_nuclei,
                             config,
                             n_freq=30,
                             tissue=None):
        images = []
        masks = []

        for i in range(0, annotated_nuclei.images.__len__()):
            images.append(annotated_nuclei.images[i].getRaw())
            masks.append(annotated_nuclei.images[i].getMask())

        # Get scales from masks
        print("Calculate mean object size ...")
        #scales_for_conv = self.getNormalizedScales(masks)
        scales_for_conv = self.getNormalizedScales(annotated_nuclei.images)

        # Rescale and Tile
        print("Rescale and tile images and masks ...")
        [images, masks, t, t, t] = self.rescaleAndTile(images=images,
                                                       masks=masks,
                                                       scales=scales_for_conv,
                                                       overlap=0,
                                                       rescale=config.scale,
                                                       usePartial=False)

        # Create artificial dataset
        if (config.diagnosis.__len__() > 1):
            img_name = 'combined'
        else:
            img_name = config.diagnosis[0]

        print("Create artificial dataset ...")
        for i in range(0, images.__len__() - 1):

            # calculate background
            tmp_image = images[i]
            tmp_mask = masks[i]
            kernel = np.ones((15, 15), np.uint8)
            bg = cv2.erode((tmp_mask == 0).astype(np.uint8),
                           kernel,
                           iterations=1)
            bg = np.sort(tmp_image[np.where(bg > 0)])

            img_nat = AnnotatedImage()
            img_nat.createWithArguments(images[i], masks[i])
            img_art = ArtificialAnnotatedImage
            img_art = img_art.transformToArtificialImage(
                image=img_nat,
                useBorderObjects=config.useBorderObjects,
                background=bg)
            img_art_beforefiltering = AnnotatedImage()
            img_art_beforefiltering.createWithArguments(
                img_art.getRaw(), img_art.getMask())

            #borders = cv2.dilate((cv2.Laplacian(tmp_mask,cv2.CV_64F)>0).astype(np.uint16), cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
            #original_raw = img_art_beforefiltering.getRaw()

            #img_art.filterLowFrequencies(n=n_freq)

            #pixels_to_change = np.where(borders>0)
            #original_raw_new = np.copy(original_raw)
            #original_raw_new[pixels_to_change] = img_art.getRaw()[pixels_to_change]
            #if not (tissue == 'Ganglioneuroma'):
            #    img_art.raw = original_raw_new.astype(img_art.raw.dtype)

            #self.visualize_frequencies([img_nat.getRaw(),img_art_beforefiltering.getRaw(),img_art.filterLowFrequencies(img_art_beforefiltering.getRaw(),n=20),img_art.filterLowFrequencies(img_art_beforefiltering.getRaw(),n=30),img_art.filterLowFrequencies(img_art_beforefiltering.getRaw(),n=40),img_art.getRaw()])
            #plt.show(block=False)
            img_combined = np.zeros(
                (images[0].shape[0], images[0].shape[1] * 2), np.float32)
            img_combined[:, 0:INPUT_SHAPE[1]] = img_nat.getRaw()
            img_combined[:,
                         INPUT_SHAPE[1]:INPUT_SHAPE[1] * 2] = img_art.getRaw()
            plt.imshow(img_combined, cmap='gray')
            img_to_sav = np.zeros(
                (img_combined.shape[0], img_combined.shape[1], 3), np.float32)
            img_to_sav[:, :, 0] = img_combined
            img_to_sav[:, :, 1] = img_combined
            img_to_sav[:, :, 2] = img_combined
            #scipy.misc.toimage(img_to_sav, cmin=0.0, cmax=1.0).save(config.outputPath + config.outputFolder + '\\Img_' + str(i) + '.jpg')

            scipy.misc.toimage(img_to_sav, cmin=0.0,
                               cmax=1.0).save(config.outputFolder + '\\Img_' +
                                              img_name + '_' + str(i) + '.jpg')
            e = 1