Example #1
0
def return_region(args, xmlID, wsiID, fileID, yStart, xStart, idxy, idxx,
                  downsampleRate, outdirT, region_size, dirs, chop_regions,
                  cNum):  # perform cutting in parallel

    if chop_regions[idxy, idxx] != 0:
        uniqID = fileID + str(yStart) + str(xStart)
        if wsiID.split('.')[-1] != 'tif':
            slide = getWsi(wsiID)
            Im = np.array(
                slide.read_region((xStart, yStart), 0,
                                  (region_size, region_size)))
            Im = Im[:, :, :3]
        else:
            yEnd = yStart + region_size
            xEnd = xStart + region_size
            Im = np.zeros([region_size, region_size, 3], dtype=np.uint8)
            Im_ = imread(wsiID)[yStart:yEnd, xStart:xEnd, :3]
            Im[0:Im_.shape[0], 0:Im_.shape[1], :] = Im_

        mask_annotation = xml_to_mask(xmlID, [xStart, yStart],
                                      [region_size, region_size],
                                      downsampleRate, 0)

        c = (Im.shape)

        s1 = int(c[0] / (downsampleRate**.5))
        s2 = int(c[1] / (downsampleRate**.5))
        Im = resize(Im, (s1, s2), mode='reflect')
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            imsave(outdirT + '/regions/' + uniqID + dirs['imExt'], Im)
            imsave(outdirT + '/masks/' + uniqID + dirs['maskExt'],
                   mask_annotation)
            '''
            plt.subplot(121)
            plt.imshow(Im)
            plt.subplot(122)
            plt.imshow(mask_annotation)
            plt.show()
            '''

        classespresent = np.unique(mask_annotation)
        classes = range(0, cNum)
        classEnumC = np.zeros([cNum, 1])

        for index, chk in enumerate(classes):
            if chk in classespresent:
                classEnumC[index] = classEnumC[index] + 1
        return classEnumC
    else:

        classes = range(0, cNum)
        classEnumC = np.zeros([cNum, 1])
        return classEnumC
def get_perf(wsi, xml1, xml2, args):
    #specs=inspect_mask(index_y[0],index_x[0],block_size,xml_annotation,xml_prediction)

    if args.wsi_ext != '.tif':
        WSIinfo = getWsi.getWsi(wsi)
        dim_x, dim_y = WSIinfo.dimensions
    else:
        im = Image.open(wsi)
        dim_x, dim_y = im.size

    totalPixels = np.float(dim_x * dim_y)
    index_y = range(0, dim_y - block_size, block_size)
    index_x = range(0, dim_x - block_size, block_size)

    num_cores = multiprocessing.cpu_count()
    r = Parallel(n_jobs=num_cores)(delayed(inspect_mask)(yStart=i,
                                                         xStart=j,
                                                         block_size=block_size,
                                                         annotation_xml=xml1,
                                                         prediction_xml=xml2)
                                   for i in index_y for j in index_x)

    TN = np.zeros((1, 5))
    TP = np.zeros((1, 5))
    FP = np.zeros((1, 5))
    FN = np.zeros((1, 5))
    sensitivity = np.zeros((1, 5))
    specificity = np.zeros((1, 5))
    precision = np.zeros((1, 5))

    for classID in range(0, 5):
        for t in range(0, len(r)):

            currentspecs = r[t]

            TP[0, classID] = TP[0, classID] + currentspecs[classID, 0]
            FP[0, classID] = FP[0, classID] + currentspecs[classID, 1]
            FN[0, classID] = FN[0, classID] + currentspecs[classID, 2]
            TN[0, classID] = TN[0, classID] + currentspecs[classID, 3]

        if (TP[0, classID] + FN[0, classID]) == 0:
            sensitivity[0, classID] = 0
        else:
            sensitivity[0, classID] = np.float(
                TP[0, classID]) / np.float(TP[0, classID] + FN[0, classID])

        if (TN[0, classID] + FP[0, classID]) == 0:
            specificity[0, classID] = 0
        else:
            specificity[0, classID] = np.float(
                TN[0, classID]) / np.float(TN[0, classID] + FP[0, classID])
        #precision[0,classID]=np.float(TP[0,classID])/np.float(TP[0,classID]+FP[0,classID])
    return sensitivity, specificity
Example #3
0
def return_region(args, wsi_mask, wsiID, fileID, yStart, xStart, idxy, idxx, downsampleRate, outdirT, region_size, dirs, chop_regions,classNum): # perform cutting in parallel
    sys.stdout.write('   <'+str(xStart)+'/'+ str(yStart)+ '>   ')
    sys.stdout.flush()
    restart_line()

    if chop_regions[idxy,idxx] != 0:

        uniqID=fileID + str(yStart) + str(xStart)
        if wsiID.split('.')[-1] != 'tif':
            slide=getWsi(wsiID)
            Im=np.array(slide.read_region((xStart,yStart),0,(region_size,region_size)))
            Im=Im[:,:,:3]
        else:
            yEnd = yStart + region_size
            xEnd = xStart + region_size
            Im = np.zeros([region_size,region_size,3], dtype=np.uint8)
            Im_ = imread(wsiID)[yStart:yEnd, xStart:xEnd, :3]
            Im[0:Im_.shape[0], 0:Im_.shape[1], :] = Im_

        mask_annotation=wsi_mask[yStart:yStart+region_size,xStart:xStart+region_size]

        o1,o2=mask_annotation.shape
        if o1 !=region_size:
            mask_annotation=np.pad(mask_annotation,((0,region_size-o1),(0,0)),mode='constant')
        if o2 !=region_size:
            mask_annotation=np.pad(mask_annotation,((0,0),(0,region_size-o2)),mode='constant')

        if downsampleRate !=1:
            c=(Im.shape)
            s1=int(c[0]/(downsampleRate**.5))
            s2=int(c[1]/(downsampleRate**.5))
            Im=(resize(Im,(s1,s2),mode='reflect')*255).astype('uint8')
            mask_annotation=resize(mask_annotation,(s1,s2),order=0,preserve_range=True)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            imsave(outdirT + '/regions/' + uniqID + dirs['imExt'],Im)
            imsave(outdirT + '/masks/' + uniqID +dirs['maskExt'],np.uint8(mask_annotation))
        classespresent=np.unique(mask_annotation)
        classes=range(0,classNum)
        classEnumC=np.zeros([classNum,1])

        for index,chk in enumerate(classes):
            if chk in classespresent:
                classEnumC[index]=classEnumC[index]+1
        return classEnumC
    else:


        classes=range(0,classNum)
        classEnumC=np.zeros([classNum,1])
        return classEnumC
Example #4
0
def get_perf(wsi, xml1, xml2, args):
    if args.wsi_ext != '.tif':
        WSIinfo = getWsi.getWsi(wsi)
        dim_x, dim_y = WSIinfo.dimensions
    else:
        im = Image.open(wsi)
        dim_x, dim_y = im.size

    totalPixels = np.float(dim_x * dim_y)

    # annotated xml
    mask_gt = xml_to_mask(xml1, (0, 0), (dim_x, dim_y), 1, 0)
    # predicted xml
    mask_pred = xml_to_mask(xml2, (0, 0), (dim_x, dim_y), 1, 0)

    np.place(mask_pred, mask_pred > 0, 1)
    np.place(mask_gt, mask_gt > 0, 1)

    TP = float(np.sum(np.multiply(mask_pred, mask_gt)))
    FP = float(np.sum(mask_pred) - TP)

    mask_pred = abs(mask_pred - 1)
    mask_gt = abs(mask_gt - 1)
    np.place(mask_pred, mask_pred > 0, 1)
    np.place(mask_gt, mask_gt > 0, 1)

    TN = float(np.sum(np.multiply(mask_pred, mask_gt)))
    FN = float(np.sum(mask_pred) - TN)

    if TP + FP == 0:
        precision = 1
    else:
        precision = (TP / (TP + FP))

    accuracy = ((TP + TN) / (TN + FN + TP + FP))

    if TN + FP == 0:
        specificity = 1
    else:
        specificity = (TN / (FP + TN))

    if TP + FN == 0:
        sensitivity = 1
    else:
        sensitivity = (TP / (TP + FN))

    return sensitivity, specificity, precision, accuracy
Example #5
0
def get_choppable_regions(wsi, index_x, index_y, boxSize, white_percent):
    if wsi.split('.')[-1] != 'tif':
        slide = getWsi(wsi)
        slide_level = slide.level_count - 1

        fullSize = slide.level_dimensions[0]
        resRatio = 16
        ds_1 = fullSize[0] / 16
        ds_2 = fullSize[1] / 16
        Im = np.array(slide.get_thumbnail((ds_1, ds_2)))

        ID = wsi.split('.svs')[0]

        hsv = rgb2hsv(Im)

        g = gaussian(hsv[:, :, 1], 20)

        binary = (g > 0.05).astype('bool')
        binary2 = binary_dilation(binary, selem=diamond(20))
        binary2 = binary_fill_holes(binary2)
        '''
        Im2=Im
        ax1=plt.subplot(121)
        ax1=plt.imshow(Im)
        ax1=plt.subplot(122)
        Im2[binary2==0,:]=0
        ax1=plt.imshow(Im2)

        plt.savefig(ID+'.png')
        '''

        choppable_regions = np.zeros((len(index_y), len(index_x)))
        for idxy, yi in enumerate(index_y):
            for idxx, xj in enumerate(index_x):
                yStart = int(np.round((yi) / resRatio))
                yStop = int(np.round((yi + boxSize) / resRatio))
                xStart = int(np.round((xj) / resRatio))
                xStop = int(np.round((xj + boxSize) / resRatio))
                box_total = (xStop - xStart) * (yStop - yStart)
                if np.sum(binary2[yStart:yStop,
                                  xStart:xStop]) > (white_percent * box_total):
                    choppable_regions[idxy, idxx] = 1

    else:
        choppable_regions = np.ones((len(index_y), len(index_x)))

    return choppable_regions
Example #6
0
def get_choppable_regions(wsi, index_x, index_y, boxSize):
    if wsi.split('.')[-1] != 'tif':
        slide = getWsi(wsi)
        slide_level = slide.level_count - 1
        thumbSize = slide.level_dimensions[slide_level]
        fullSize = slide.level_dimensions[0]
        resRatio = (fullSize[0] / thumbSize[0])
        Im = np.array(slide.read_region((0, 0), slide_level, (thumbSize)))
        ID = wsi.split('.svs')[0]

        grayImage = 0.2125 * Im[:, :, 0] + 0.7154 * Im[:, :,
                                                       1] + 0.0721 * Im[:, :,
                                                                        2]
        grayImage[grayImage == 0] = 255

        #thresh = threshold_otsu(grayImage)
        thresh = 240
        binary = grayImage < thresh
        binary = binary_fill_holes(binary)
        #binary=binary(binary,se)
        #binary=binary_fill_holes(binary)
        #binary=remove_small_objects(binary,object_size)

        choppable_regions = np.zeros((len(index_y), len(index_x)))
        for idxy, yi in enumerate(index_y):
            for idxx, xj in enumerate(index_x):
                yStart = int(np.round((yi) / resRatio))
                yStop = int(np.round((yi + boxSize) / resRatio))
                xStart = int(np.round((xj) / resRatio))
                xStop = int(np.round((xj + boxSize) / resRatio))
                if np.sum(binary[yStart:yStop, xStart:xStop]) > 0:
                    choppable_regions[idxy, idxx] = 1

    else:
        choppable_regions = np.ones((len(index_y), len(index_x)))

    return choppable_regions
Example #7
0
def IterateTraining(args):
    ## calculate low resolution block params
    downsampleLR = int(
        args.downsampleRateLR**.5)  #down sample for each dimension
    region_sizeLR = int(args.boxSizeLR *
                        (downsampleLR))  #Region size before downsampling
    stepLR = int(region_sizeLR *
                 (1 - args.overlap_percentLR))  #Step size before downsampling
    ## calculate low resolution block params
    downsampleHR = int(
        args.downsampleRateHR**.5)  #down sample for each dimension
    region_sizeHR = int(args.boxSizeHR *
                        (downsampleHR))  #Region size before downsampling
    stepHR = int(region_sizeHR *
                 (1 - args.overlap_percentHR))  #Step size before downsampling

    global classEnumLR, classEnumHR
    dirs = {'imExt': '.jpeg'}
    dirs['basedir'] = args.base_dir
    dirs['maskExt'] = '.png'
    dirs['modeldir'] = '/MODELS/'
    dirs['tempdirLR'] = '/TempLR/'
    dirs['tempdirHR'] = '/TempHR/'
    dirs['pretraindir'] = '/Deeplab_network/'
    dirs['training_data_dir'] = '/TRAINING_data/'
    dirs['model_init'] = 'deeplab_resnet.ckpt'
    dirs['project'] = '/' + args.project
    dirs['data_dir_HR'] = args.base_dir + '/' + args.project + '/Permanent/HR/'
    dirs['data_dir_LR'] = args.base_dir + '/' + args.project + '/Permanent/LR/'

    ##All folders created, initiate WSI loading by human
    #raw_input('Please place WSIs in ')

    ##Check iteration session

    currentmodels = os.listdir(dirs['basedir'] + dirs['project'] +
                               dirs['modeldir'])

    currentAnnotationIteration = check_model_generation(dirs)

    print('Current training session is: ' + str(currentAnnotationIteration))

    ##Create objects for storing class distributions
    annotatedXMLs = glob(dirs['basedir'] + dirs['project'] +
                         dirs['training_data_dir'] +
                         str(currentAnnotationIteration) + '/*.xml')
    classes = []
    if args.classNum == 0:
        for xml in annotatedXMLs:
            classes.append(get_num_classes(xml))

        classNum_LR = max(classes)
        classNum_HR = max(classes)
    else:
        classNum_LR = args.classNum
        if args.classNum_HR != 0:
            classNum_HR = args.classNum_HR
        else:
            classNum_HR = classNum_LR
    classEnumLR = np.zeros([classNum_LR, 1])
    classEnumHR = np.zeros([classNum_HR, 1])

    ##for all WSIs in the initiating directory:
    if args.chop_data == 'True':
        print('Chopping')

        start = time.time()
        for xmlID in annotatedXMLs:

            #Get unique name of WSI
            fileID = xmlID.split('/')[-1].split('.xml')[0]

            #create memory addresses for wsi files
            for ext in [args.wsi_ext]:
                wsiID = dirs['basedir'] + dirs['project'] + dirs[
                    'training_data_dir'] + str(
                        currentAnnotationIteration) + '/' + fileID + ext

                #Ensure annotations exist
                if os.path.isfile(wsiID) == True:
                    break

            #Ensure annotations exist
            if os.path.isfile(wsiID) == False:
                print('\nError - missing wsi file: ' + wsiID +
                      ' Please provide.\n')

            #Load openslide information about WSI
            if ext != '.tif':
                slide = getWsi(wsiID)
                #WSI level 0 dimensions (largest size)
                dim_x, dim_y = slide.dimensions
            else:
                im = Image.open(wsiID)
                dim_x, dim_y = im.size

            #Generate iterators for parallel chopping of WSIs in low resolution
            index_yLR = range(0, dim_y - stepLR, stepLR)
            index_xLR = range(0, dim_x - stepLR, stepLR)

            #Create memory address for chopped images low resolution
            outdirLR = dirs['basedir'] + dirs['project'] + dirs['tempdirLR']

            #Enumerate cpu core count
            num_cores = multiprocessing.cpu_count()

            #Perform low resolution chopping in parallel and return the number of
            #images in each of the labeled classes
            chop_regions = get_choppable_regions(
                wsi=wsiID,
                index_x=index_xLR,
                index_y=index_yLR,
                boxSize=region_sizeLR,
                white_percent=args.white_percent)

            classEnumCLR = Parallel(n_jobs=num_cores)(
                delayed(return_region)(args=args,
                                       xmlID=xmlID,
                                       wsiID=wsiID,
                                       fileID=fileID,
                                       yStart=j,
                                       xStart=i,
                                       idxy=idxy,
                                       idxx=idxx,
                                       downsampleRate=args.downsampleRateLR,
                                       outdirT=outdirLR,
                                       region_size=region_sizeLR,
                                       dirs=dirs,
                                       chop_regions=chop_regions,
                                       cNum=classNum_LR)
                for idxx, i in enumerate(index_xLR)
                for idxy, j in enumerate(index_yLR))

            #Add number of images in each class to the global count low resolution
            CSLR = (sum(classEnumCLR))
            for c in range(0, CSLR.shape[0]):
                classEnumLR[c] = classEnumLR[c] + CSLR[c]
            #classEnumLR=[float(377),float(126)]
            #Print enumerations for each class

            #Generate iterators for parallel chopping of WSIs in high resolution
            index_yHR = range(0, dim_y - stepHR, stepHR)
            index_xHR = range(0, dim_x - stepHR, stepHR)

            #Create memory address for chopped images high resolution
            outdirHR = dirs['basedir'] + dirs['project'] + dirs['tempdirHR']

            #Perform high resolution chopping in parallel and return the number of
            #images in each of the labeled classes
            chop_regions = get_choppable_regions(
                wsi=wsiID,
                index_x=index_xHR,
                index_y=index_yHR,
                boxSize=region_sizeHR,
                white_percent=args.white_percent)

            classEnumCHR = Parallel(n_jobs=num_cores)(
                delayed(return_region)(args=args,
                                       xmlID=xmlID,
                                       wsiID=wsiID,
                                       fileID=fileID,
                                       yStart=j,
                                       xStart=i,
                                       idxy=idxy,
                                       idxx=idxx,
                                       downsampleRate=args.downsampleRateHR,
                                       outdirT=outdirHR,
                                       region_size=region_sizeHR,
                                       dirs=dirs,
                                       chop_regions=chop_regions,
                                       cNum=classNum_HR)
                for idxx, i in enumerate(index_xHR)
                for idxy, j in enumerate(index_yHR))

            #Add number of images in each class to the global count high resolution
            CSHR = (sum(classEnumCHR))
            for c in range(0, CSHR.shape[0]):
                classEnumHR[c] = classEnumHR[c] + CSHR[c]

            #classEnumHR=[float(6334),float(488)]
            #Print enumerations for each class

        print('Time for WSI chopping: ' + str(time.time() - start))

        ##Augment low resolution data
        #Location of augmentable data
        imagesToAugmentLR = dirs['basedir'] + dirs['project'] + dirs[
            'tempdirLR'] + 'regions/'
        masksToAugmentLR = dirs['basedir'] + dirs['project'] + dirs[
            'tempdirLR'] + 'masks/'
        augmentList = glob(imagesToAugmentLR + '*.jpeg')

        #Parallel iter
        augIter = range(0, len(augmentList))

        #Output location for augmented data
        dirs['outDirAI'] = dirs['basedir'] + dirs['project'] + dirs[
            'tempdirLR'] + '/Augment' + '/regions/'
        dirs['outDirAM'] = dirs['basedir'] + dirs['project'] + dirs[
            'tempdirLR'] + '/Augment' + '/masks/'

        #Enumerate low resolution class distributions for augmentation ratios
        classDistLR = np.zeros(len(classEnumLR))
        ImageClassSplits = int((sum(classEnumLR) * args.aug_LR) / classNum_LR)

        classAugs = np.zeros(classDistLR.shape)
        for idx, value in enumerate(classEnumLR):
            classDistLR[idx] = value / sum(classEnumLR)
            classAugs[idx] = int(ImageClassSplits / value)
        print('Low resolution augmentation distribution:')
        print(classAugs)
        #Define number of augmentations per class
        if args.aug_LR > 0:
            #classAugs=(np.round(args.aug_LR*(1-classDistLR))+1)
            classAugs = classAugs.astype(int)
            augmentOrder = np.argsort(classDistLR)

            #Augment images in parallel using inverted class distributions for augmentation iterations
            num_cores = multiprocessing.cpu_count()
            start = time.time()

            Parallel(n_jobs=num_cores)(
                delayed(run_batch)(augmentList, masksToAugmentLR, batchidx,
                                   classAugs, args.boxSizeLR, args.hbound, args
                                   .lbound, augmentOrder, dirs, classNum_LR)
                for batchidx in augIter)

            moveimages(
                dirs['outDirAI'],
                dirs['basedir'] + dirs['project'] + '/Permanent/LR/regions/')
            moveimages(
                dirs['outDirAM'],
                dirs['basedir'] + dirs['project'] + '/Permanent/LR/masks/')
            #augamt=len(glob(dirs['outDirAI'] + '*' +  dirs['imExt']))

        moveimages(
            dirs['basedir'] + dirs['project'] + dirs['tempdirLR'] +
            '/regions/',
            dirs['basedir'] + dirs['project'] + '/Permanent/LR/regions/')
        moveimages(
            dirs['basedir'] + dirs['project'] + dirs['tempdirLR'] + '/masks/',
            dirs['basedir'] + dirs['project'] + '/Permanent/LR/masks/')
        end = time.time() - start
        print('Time for low resolution augmenting: ' +
              str((time.time() - totalStart) / 60) + ' minutes.')
        ##High resolution augmentation
        #Enumerate high resolution class distribution
        ImageClassSplits = int((sum(classEnumHR) * args.aug_HR) / classNum_HR)

        classDistHR = np.zeros(len(classEnumHR))
        classAugs = np.zeros(classDistHR.shape)
        for idx, value in enumerate(classEnumHR):
            classDistHR[idx] = value / sum(classEnumHR)
            classAugs[idx] = int(ImageClassSplits / value)
        print('High resolution augmentation distribution:')
        print(classAugs)
        #Define number of augmentations per class
        if args.aug_HR > 0:
            augmentOrder = np.argsort(classDistHR)
            #classAugs=(np.round(args.aug_HR*(1-classDistHR))+1)
            classAugs = classAugs.astype(int)

            #High resolution input augmentable data
            imagesToAugmentHR = dirs['basedir'] + dirs['project'] + dirs[
                'tempdirHR'] + 'regions/'
            masksToAugmentHR = dirs['basedir'] + dirs['project'] + dirs[
                'tempdirHR'] + 'masks/'
            augmentList = glob(imagesToAugmentHR + '*.jpeg')

            #Parallel iterator
            augIter = range(0, len(augmentList))

            #Output for augmented data
            dirs['outDirAI'] = dirs['basedir'] + dirs['project'] + dirs[
                'tempdirHR'] + '/Augment' + '/regions/'
            dirs['outDirAM'] = dirs['basedir'] + dirs['project'] + dirs[
                'tempdirHR'] + '/Augment' + '/masks/'

            #Augment in parallel
            num_cores = multiprocessing.cpu_count()
            start = time.time()
            Parallel(n_jobs=num_cores)(
                delayed(run_batch)(augmentList, masksToAugmentHR, batchidx,
                                   classAugs, args.boxSizeHR, args.hbound, args
                                   .lbound, augmentOrder, dirs, classNum_HR)
                for batchidx in augIter)
            end = time.time() - start
            #augamt=len(glob(dirs['outDirAI'] + '*' +  dirs['imExt']))

            moveimages(
                dirs['outDirAI'],
                dirs['basedir'] + dirs['project'] + '/Permanent/HR/regions/')
            moveimages(
                dirs['outDirAM'],
                dirs['basedir'] + dirs['project'] + '/Permanent/HR/masks/')

        moveimages(
            dirs['basedir'] + dirs['project'] + dirs['tempdirHR'] +
            '/regions/',
            dirs['basedir'] + dirs['project'] + '/Permanent/HR/regions/')
        moveimages(
            dirs['basedir'] + dirs['project'] + dirs['tempdirHR'] + '/masks/',
            dirs['basedir'] + dirs['project'] + '/Permanent/HR/masks/')

        #Total time
        print('Time for high resolution augmenting: ' +
              str((time.time() - totalStart) / 60) + ' minutes.')

    #Generate training and validation argumentshtop
    training_args_list = [
    ]  # list of training argument directories low res and high res
    training_args_LR = []
    training_args_HR = []

    ##### LOW REZ ARGS #####
    dirs['outDirAILR'] = dirs['basedir'] + '/' + dirs[
        'project'] + '/Permanent/LR/regions/'
    dirs['outDirAMLR'] = dirs['basedir'] + '/' + dirs[
        'project'] + '/Permanent/LR/masks/'

    ########fix this
    trainOutLR = dirs[
        'basedir'] + '/Codes' + '/Deeplab_network/datasetLR/train.txt'
    valOutLR = dirs['basedir'] + '/Codes' + '/Deeplab_network/datasetLR/val.txt'

    generateDatalists(dirs['outDirAILR'], dirs['outDirAMLR'], '/regions/',
                      '/masks/', dirs['imExt'], dirs['maskExt'], trainOutLR)
    numImagesLR = len(glob(dirs['outDirAILR'] + '*' + dirs['imExt']))

    numStepsLR = (args.epoch_LR * numImagesLR) / args.CNNbatch_sizeLR
    pretrain_LR = get_pretrain(currentAnnotationIteration, '/LR/', dirs)
    modeldir_LR = dirs['basedir'] + dirs['project'] + dirs['modeldir'] + str(
        currentAnnotationIteration + 1) + '/LR/'

    pretrain_HR = get_pretrain(currentAnnotationIteration, '/HR/', dirs)

    modeldir_HR = dirs['basedir'] + dirs['project'] + dirs['modeldir'] + str(
        currentAnnotationIteration + 1) + '/HR/'

    # assign to dict
    training_args_LR = {
        'numImages': numImagesLR,
        'data_list': trainOutLR,
        'batch_size': args.CNNbatch_sizeLR,
        'num_steps': numStepsLR,
        'save_interval': np.int(round(numStepsLR / args.saveIntervals)),
        'pretrain_file': pretrain_LR,
        'input_height': args.boxSizeLR,
        'input_width': args.boxSizeLR,
        'modeldir': modeldir_LR,
        'num_classes': classNum_LR,
        'gpu': args.gpu,
        'data_dir': dirs['data_dir_LR'],
        'print_color': "\033[3;37;40m",
        'log_file':
        modeldir_LR + 'log_' + str(currentAnnotationIteration + 1) + '_LR.txt',
        'log_dir': modeldir_LR + 'log/',
        'learning_rate': args.learning_rate_LR,
    }
    training_args_list.append(training_args_LR)

    ##### HIGH REZ ARGS #####
    dirs['outDirAIHR'] = dirs['basedir'] + '/' + dirs[
        'project'] + '/Permanent/HR/regions/'
    dirs['outDirAMHR'] = dirs['basedir'] + '/' + dirs[
        'project'] + '/Permanent/HR/masks/'

    #######Fix this
    trainOutHR = dirs[
        'basedir'] + '/Codes' + '/Deeplab_network/datasetHR/train.txt'
    valOutHR = dirs['basedir'] + '/Codes' + '/Deeplab_network/datasetHR/val.txt'

    generateDatalists(dirs['outDirAIHR'], dirs['outDirAMHR'], '/regions/',
                      '/masks/', dirs['imExt'], dirs['maskExt'], trainOutHR)
    numImagesHR = len(glob(dirs['outDirAIHR'] + '*' + dirs['imExt']))

    numStepsHR = (args.epoch_HR * numImagesHR) / args.CNNbatch_sizeHR
    # assign to dict
    training_args_HR = {
        'numImages': numImagesHR,
        'data_list': trainOutHR,
        'batch_size': args.CNNbatch_sizeHR,
        'num_steps': numStepsHR,
        'save_interval': np.int(round(numStepsHR / args.saveIntervals)),
        'pretrain_file': pretrain_HR,
        'input_height': args.boxSizeHR,
        'input_width': args.boxSizeHR,
        'modeldir': modeldir_HR,
        'num_classes': classNum_HR,
        'gpu': args.gpu + args.gpu_num - 1,
        'data_dir': dirs['data_dir_HR'],
        'print_color': "\033[1;32;40m",
        'log_file':
        modeldir_HR + 'log_' + str(currentAnnotationIteration + 1) + '_HR.txt',
        'log_dir': modeldir_HR + 'log/',
        'learning_rate': args.learning_rate_HR,
    }
    training_args_list.append(training_args_HR)

    # train networks in parallel
    num_cores = args.gpu_num  # GPUs
    Parallel(n_jobs=num_cores,
             backend='threading')(delayed(train_net)(training_args, dirs)
                                  for training_args in training_args_list)

    finish_model_generation(dirs, currentAnnotationIteration)

    print('\n\n\033[92;5mPlease place new wsi file(s) in: \n\t' +
          dirs['basedir'] + dirs['project'] + dirs['training_data_dir'] +
          str(currentAnnotationIteration + 1))
    print('\nthen run [--option predict]\033[0m\n')
Example #8
0
from xml_to_mask import xml_to_mask
from getWsi import getWsi
from matplotlib import pyplot as plt

slide=getWsi('/hdd/bg/HAIL2/DeepZoomPrediction/TRAINING_data/0/52483.svs')
[d1,d2]=slide.dimensions
x='/hdd/bg/HAIL2/DeepZoomPrediction/TRAINING_data/0/52483.xml'
wsiMask=xml_to_mask(x,(0,0),(d1,d2),16,0)

plt.imshow(wsiMask*255)
plt.show()