Example #1
0
def generateLabelImages(fp,
                        imgDir,
                        fontScale=1,
                        size=1,
                        rank=0,
                        structure=[[1, 1, 1], [1, 1, 1], [1, 1, 1]]):
    [row, col, numFrames, frameList] = misc.getVitals(fp)
    particleList = fp.attrs['particleList']
    zfillVal = fp.attrs['zfillVal']
    procFrameList = numpy.array_split(frameList, size)
    for frame in procFrameList[rank]:
        labelImg = fp['/segmentation/labelStack/' +
                      str(frame).zfill(zfillVal)].value
        gImg = fp['/dataProcessing/gImgRawStack/' +
                  str(frame).zfill(zfillVal)].value
        bImg = labelImg.astype('bool')
        bImgBdry = imageProcess.normalize(imageProcess.boundary(bImg))
        label, numLabel, dictionary = imageProcess.regionProps(
            bImg, gImg, structure=structure, centroid=True)
        bImg = imageProcess.normalize(bImg)
        for j in range(len(dictionary['id'])):
            bImgLabelN = label == dictionary['id'][j]
            ID = numpy.max(bImgLabelN * labelImg)
            bImg = imageProcess.textOnGrayImage(
                bImg,
                str(ID), (int(dictionary['centroid'][j][0]) + 3,
                          int(dictionary['centroid'][j][1]) - 3),
                fontScale=fontScale,
                color=127,
                thickness=1)
        finalImage = numpy.column_stack((bImg, numpy.maximum(bImgBdry, gImg)))
        cv2.imwrite(imgDir + '/' + str(frame).zfill(zfillVal) + '.png',
                    finalImage)
    return 0
Example #2
0
    def convertGatanMovie(self):
        print('Converting gatan movie - %s' % (self.inputDir))
        minCount, maxCount = 1e10, -1e10
        for inputFile, frame in tqdm(
                zip(self.inputFileList, list(range(1, self.numFrames + 1)))):
            f = hs.load(inputFile)
            gImg = f.data
            lowLimit, highLimit = numpy.percentile(
                gImg, float(self.minPercentile)), numpy.percentile(
                    gImg, float(self.maxPercentile))
            minCount = min(minCount, lowLimit)
            maxCount = max(maxCount, highLimit)
            numpy.save(self.outputNPYDir + '/' + str(frame).zfill(6) + '.npy',
                       gImg)

        for inputFile, frame in tqdm(
                zip(self.inputFileList, list(range(1, self.numFrames + 1)))):
            f = hs.load(inputFile)
            gImg = f.data
            lowLimit, highLimit = numpy.percentile(
                gImg, float(self.minPercentile)), numpy.percentile(
                    gImg, float(self.maxPercentile))
            gImg[gImg <= lowLimit] = lowLimit
            gImg[gImg >= highLimit] = highLimit
            gImg = imageProcess.normalize(
                gImg,
                min=(gImg.min() - minCount) / (maxCount - minCount) * 255,
                max=(gImg.max() - minCount) / (maxCount - minCount) * 255)
            cv2.imwrite(self.outputPNGDir + '/' + str(frame).zfill(6) + '.png',
                        gImg)
Example #3
0
def saveImageSequence(gImgStack,outputDir,normalize=False):
    print "Saving volume stack as image sequence"
    [row,col,numFrames] = gImgStack.shape
    for frame in tqdm(range(1,numFrames+1)):
        if (normalize==True):
            gImg = imageProcess.normalize(gImgStack[:,:,frame-1])
        else:
            gImg = gImgStack[:,:,frame-1]
        cv2.imwrite(outputDir+'/'+str(frame).zfill(6)+'.png',gImg)
    return 0
Example #4
0
def normalizeDataset(x):
    '''
    Stretch the contrast of each labelled dataset between 0 to 255.
    
    Input parameters:
    x : (2D array) With N rows and 1024 or 4096 columns. Each row
        corresponds to a flattened image.
        
    Returns:
    x : (2D array) With N rows and 1024 or 4096 columns. Each row
        corresponds to a flattened image. The intensity values of each
        image are stretched between 0 to 255.
    '''
    try:
        [rowDataset,colDataset] = x.shape
        for i in range(rowDataset):
            x[i,:] = imageProcess.normalize(x[i,:])
    except:
        [N,row,col,channel] = x.shape
        for i in range(N):
            x[i,:,:,:] = imageProcess.normalize(x[i,:,:,:])
    return x
Example #5
0
    def averageFrames(self,
                      path,
                      outputDir,
                      numFramesToAvg,
                      extension,
                      movingAvgFlag=True):
        print('Averaging image sequence - %s' % (path))
        fileIO.mkdir(outputDir)
        avgFrameList = []
        firstFrame, lastFrame = 1, self.numFrames
        for frame1 in range(int(firstFrame), int(lastFrame + 1)):
            if (frame1 + numFramesToAvg <= lastFrame + 1):
                frameList = []
                for frame2 in range(frame1, frame1 + numFramesToAvg):
                    frameList.append(frame2)
                avgFrameList.append(frameList)

        if (extension == 'png'):
            for frameList in tqdm(avgFrameList):
                outputFile = outputDir + '/' + str(
                    frameList[0]).zfill(6) + '.png'
                for frame in frameList:
                    inputFile = path + '/' + str(frame).zfill(6) + '.png'
                    gImg = cv2.imread(inputFile, 0)
                    if (frame == frameList[0]):
                        avgImg = gImg.copy()
                        avgImg = avgImg.astype('double')
                    else:
                        avgImg = avgImg + gImg
                avgImg = (avgImg / numFramesToAvg).astype('uint8')
                avgImg = imageProcess.normalize(avgImg)
                cv2.imwrite(outputFile, avgImg)
        elif (extension == 'npy'):
            for frameList in tqdm(avgFrameList):
                outputFile = outputDir + '/' + str(
                    frameList[0]).zfill(6) + '.npy'
                for frame in frameList:
                    inputFile = path + '/' + str(frame).zfill(6) + '.npy'
                    gImg = numpy.load(inputFile)
                    if (frame == frameList[0]):
                        avgImg = gImg.copy()
                        avgImg = avgImg.astype('double')
                    else:
                        avgImg = avgImg + gImg
                avgImg = avgImg / numFramesToAvg
                numpy.save(outputFile, avgImg)
Example #6
0
def generateLabelImagesText(fp, imgDir, fontScale=1, size=1, rank=0, scale=1):
    [row, col, numFrames, frameList] = misc.getVitals(fp)
    particleList = fp.attrs['particleList']
    zfillVal = fp.attrs['zfillVal']
    procFrameList = numpy.array_split(frameList, size)
    trackingData = numpy.loadtxt(fp.attrs['outputDir'] + '/tracking.dat')

    for frame in tqdm(procFrameList[rank]):
        gImg = fp['/dataProcessing/gImgRawStack/' +
                  str(frame).zfill(zfillVal)].value
        gImgNormScaled = imageProcess.normalize(cv2.resize(
            gImg, (int(col * scale), int(row * scale)),
            interpolation=cv2.INTER_CUBIC),
                                                min=0,
                                                max=230)
        bImg = gImgNormScaled.copy()
        bImg[:] = 0
        tracking = trackingData[trackingData[:, 0] == frame]

        for f, particle, r, c, rad, area, label in tracking:
            if (label != 0):
                rr, cc = circle_perimeter(int(r * scale), int(c * scale),
                                          int(rad * scale))
                if ((rr < 0).any() == True or (cc < 0).any() == True):
                    pass
                elif ((rr > row * scale - 1).any() == True
                      or (cc > col * scale - 1).any() == True):
                    pass
                else:
                    gImgNormScaled[rr, cc] = 255
                    rr, cc = circle(int(r * scale), int(c * scale),
                                    int(rad * scale))
                    bImg[rr, cc] = 255
        for f, particle, r, c, rad, area, label in tracking:
            if (label != 0):
                bImg = imageProcess.textOnGrayImage(
                    bImg,
                    str(int(label)), (int(r * scale), int(c * scale)),
                    fontScale=fontScale,
                    color=127,
                    thickness=1)
        finalImage = numpy.column_stack((gImgNormScaled, bImg))
        cv2.imwrite(imgDir + '/' + str(frame).zfill(zfillVal) + '.png',
                    finalImage)
    return 0
Example #7
0
[row, col, numFrames, frameList] = misc.getVitals(fp)
procFrameList = numpy.array_split(frameList, size)

for frame in tqdm(procFrameList[rank]):
    gImgProc = fp['/dataProcessing/gImgRawStack/' +
                  str(frame).zfill(zfillVal)].value
    if (blurFlag == True):
        gImgProc = median(gImgProc, disk(5))
        gImgProc = median(gImgProc, disk(4))
    if (invertFlag == True):
        gImgProc = imageProcess.invertImage(gImgProc)
    if (bgSubFlag == True):
        gImgProc = imageProcess.subtractBackground(gImgProc,
                                                   sigma=bgSubSigmaTHT,
                                                   radius=radiusTHT)
    gImgProc = imageProcess.normalize(gImgProc)
    cv2.imwrite(
        outputDir + '/dataProcessing/processedStack/' +
        str(frame).zfill(zfillVal) + '.png', gImgProc)

comm.Barrier()

if (rank == 0):
    print "Writing processed image to h5 dataset"
    for frame in tqdm(frameList):
        gImgProc = cv2.imread(
            outputDir + '/dataProcessing/processedStack/' +
            str(frame).zfill(zfillVal) + '.png', 0)
        fileIO.writeH5Dataset(
            fp, '/dataProcessing/processedStack/' + str(frame).zfill(zfillVal),
            gImgProc)
Example #8
0
        cropSize = int(
            round(
                max(
                    2.0 * 0.75 * (colTopRight - colTopLeft) /
                    (numPillarsInRow - 1), 2.0 * 0.75 *
                    (rowBottomLeft - rowTopLeft) / (numPillarsInCol - 1))))
        if ('.dm3' in inputFile):
            outputFile = inputFile.replace('.dm3', '.png')
        elif ('.dm4' in inputFile):
            outputFile = inputFile.replace('.dm4', '.png')
        print('Processing %s' % (inputFile))
        tag = inputFile.split('/')[-2]

        gImg = imageProcess.readDM4(inputFile)
        [row, col] = gImg.shape
        gImgNorm = imageProcess.normalize(gImg, min=30, max=230)

        topRowPillarCentre = numpy.column_stack((\
            numpy.linspace(rowTopLeft,rowTopRight,numPillarsInRow),\
            numpy.linspace(colTopLeft,colTopRight,numPillarsInRow)))
        bottomRowPillarCenter = numpy.column_stack((\
            numpy.linspace(rowBottomLeft,rowBottomRight,numPillarsInRow),\
            numpy.linspace(colBottomLeft,colBottomRight,numPillarsInRow)))

        ###### ITERATING THROUGH EACH NANOPILLAR IN THE INPUT IMAGE
        pillarID = 0
        for coordTop, coordBottom in zip(topRowPillarCentre,
                                         bottomRowPillarCenter):
            pillarColumnCoord = numpy.column_stack(
                (numpy.linspace(coordTop[0], coordBottom[0], numPillarsInCol),
                 numpy.linspace(coordTop[1], coordBottom[1], numPillarsInCol)))
#######################################################################
if (rank == 0):
    print "Performing segmentation for all the frames"

fp = h5py.File(outputFile, 'r')
[row, col, numFrames, frameList] = misc.getVitals(fp)
procFrameList = numpy.array_split(frameList, size)

areaRange = numpy.array([60, 500], dtype='float64')
circularityRange = numpy.array([0.85, 1], dtype='float64')
sigma = 1

for frame in procFrameList[rank]:
    gImgRaw = fp['/dataProcessing/gImgRawStack/' +
                 str(frame).zfill(zfillVal)].value
    gImgNorm = imageProcess.normalize(gImgRaw, min=0, max=230)
    gImgProc = fp['/dataProcessing/processedStack/' +
                  str(frame).zfill(zfillVal)].value
    bImgKapur = gImgProc >= myCythonFunc.threshold_kapur(gImgProc.flatten())

    gImgInv = 255 - gImgRaw
    gImgBlur = ndimage.gaussian_filter(gImgInv, sigma=sigma)
    bImgAdaptive = cv2.adaptiveThreshold(gImgBlur, 255,
                                         cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                         cv2.THRESH_BINARY, 15,
                                         0).astype('bool')

    bImg = numpy.logical_and(bImgKapur, bImgAdaptive)
    bImg = imageProcess.fillHoles(bImg)
    bImg = myCythonFunc.removeBoundaryParticles(bImg.astype('uint8'))
    bImg = myCythonFunc.areaThreshold(bImg.astype('uint8'),
############################################################


############################################################
# MAKE COLLAGE FOR THE COLLAPSED AND NOT COLLAPSED PILLARS
############################################################
numRowCollage,numColCollage = 3,20
counter_0,counter_1 = 0,0
img_0 = numpy.zeros([imgSize*numRowCollage,imgSize*numColCollage],dtype='uint8')
img_1 = numpy.zeros([imgSize*numRowCollage,imgSize*numColCollage],dtype='uint8')
for i in range(row):
    if (y[i]==0 and counter_0<numRowCollage*numColCollage):
        row_0,col_0 = int(int(counter_0/numColCollage)*imgSize),int((counter_0%numColCollage)*imgSize)
        counter_0+=1
        img = numpy.reshape(x[i,:],(64,64))
        img = imageProcess.normalize(img)
        img_0[row_0:row_0+imgSize,col_0:col_0+imgSize] = img
    elif (y[i]==1 and counter_1<numRowCollage*numColCollage):
        row_1,col_1 = int(int(counter_1/numColCollage)*imgSize),int((counter_1%numColCollage)*imgSize)
        counter_1+=1
        img = numpy.reshape(x[i,:],(64,64))
        img = imageProcess.normalize(img)
        img_1[row_1:row_1+imgSize,col_1:col_1+imgSize] = img
    if (counter_0>=numRowCollage*numColCollage and counter_1>=numRowCollage*numColCollage):
        break
cv2.imwrite('collage_collapsed.png',img_1)
cv2.imwrite('collage_not_collapsed.png',img_0)
############################################################


############################################################
      numpy.sum(yTrainInd, axis=0), numpy.sum(yTestInd, axis=0))
############################################################

############################################################
# IMAGE PROCESSING METHOD TO SEGMENT AND CLASSIFY NANOPILLARS
############################################################
outFile = open('imageProcessingLabel.dat', 'w')
outFile.write('Actual label\tPredicted label\n')

# RUN ON TRAINING DATA
counter1, counter2 = 0, 0
totalCount, incorrectCount = 0, 0
for frame, x, y in tqdm(zip(range(yTrain.size), xTrain, yTrain)):
    totalCount += 1
    gImg = numpy.reshape(x, (row, col))
    gImgNorm = imageProcess.normalize(gImg)
    gImg = imageProcess.invert(gImgNorm)

    # THRESHOLD METHOD 1 (OTSU)
    bImg = gImg >= imageProcess.otsuThreshold(gImg)

    # THRESHOLD METHOD 2 (KAPUR)
    # bImg = gImg >= imageProcess.threshold_kapur(gImg)

    # THRESHOLD METHOD 2 (MEAN)
    # bImg = gImg >= numpy.mean(gImg)

    bImg = imageProcess.binary_opening(bImg, iterations=4)
    labelImg, numLabel, dictionary = imageProcess.regionProps(bImg)

    distanceFromCentre = 1e10