Ejemplo n.º 1
0
def selectSpecialPatch(imgFile,
                       wordsFile,
                       feaType,
                       gridSize,
                       sizeRange,
                       nk,
                       filter_radius=3,
                       spaceSize=10):
    feaVecs, posVecs = glf.genImgLocalFeas(imgFile, feaType, gridSize,
                                           sizeRange)
    # print feaVecs.shape, posVecs.shape
    labelVecs = chm.calPatchLabels2(wordsFile,
                                    feaVecs,
                                    k=nk,
                                    two_classes=['1', '2'],
                                    isH1=True)
    posVecs_f, labelVecs_f = filterPos(posVecs,
                                       labelVecs,
                                       radius=filter_radius,
                                       spaceSize=spaceSize)
    specialIDs = list(np.argwhere(labelVecs_f == 0)[:, 0])
    specialPos = list(posVecs_f[specialIDs, :])
    patchData, _, _ = esg.generateGridPatchData(imgFile,
                                                gridSize,
                                                sizeRange,
                                                gridList=specialPos)
    # patchData_arr = np.array(patchData)
    return patchData, specialPos
Ejemplo n.º 2
0
def calFusionFea(imgFile, feaTypes, gridSize, sizeRange, gridList=None):
    for feaType in feaTypes:
        feas, positions = genImgLocalFeas(imgFile,
                                          feaType,
                                          gridSize,
                                          sizeRange,
                                          HisFeaDim=32,
                                          gridList=gridList)
        if gridList is None:
            fusionFeas = np.empty((len(positions), 0))
            gridList = list(positions)
        fusionFeas = np.append(fusionFeas, feas, axis=1)
    return fusionFeas, positions
Ejemplo n.º 3
0
def extractCascadeFeatures(img_file, sift_u, lbp_u, sdae_u_d, sdae_u_s,
                           gridList, gridSize, sizeRange, sdaePara):

    feas_sift, pos = glf.genImgLocalFeas(img_file,
                                         'SIFT',
                                         gridSize,
                                         sizeRange,
                                         u_reduce=sift_u,
                                         gridList=gridList)
    feas_lbp, pos = glf.genImgLocalFeas(img_file,
                                        'LBP',
                                        gridSize,
                                        sizeRange,
                                        u_reduce=lbp_u,
                                        gridList=gridList)
    sdaePara['weight'] = sdaePara['weight_s']
    sdaePara['patchMean'] = False
    feas_sdae_s, pos = glf.genImgLocalFeas(img_file,
                                           'SDAE',
                                           gridSize,
                                           sizeRange,
                                           sdaePara=sdaePara,
                                           u_reduce=sdae_u_s,
                                           gridList=gridList)

    sdaePara['weight'] = sdaePara['weight_d']
    sdaePara['patchMean'] = True
    feas_sdae_d, pos = glf.genImgLocalFeas(img_file,
                                           'SDAE',
                                           gridSize,
                                           sizeRange,
                                           sdaePara=sdaePara,
                                           u_reduce=sdae_u_d,
                                           gridList=gridList)

    feas_cascade = np.column_stack(
        (feas_sift, feas_lbp, feas_sdae_d, feas_sdae_s))

    return feas_cascade, pos
Ejemplo n.º 4
0
def saveImgHeatMaps(imgFile, isReduce=False):
    sdae_wordsFile_h1 = '../../Data/Features/type4_SDAEWords_h1.hdf5'
    sdae_wordsFile_h2 = '../../Data/Features/type4_SDAEWords_h2.hdf5'
    sdae_wordsFile_h1_diff_mean = '../../Data/Features/type4_SDAEWords_h1_diff_mean.hdf5'
    sdae_wordsFile_h2_diff_mean = '../../Data/Features/type4_SDAEWords_h2_diff_mean.hdf5'
    sdae_wordsFile_h1_reduce = '../../Data/Features/type4_SDAEWords_h1_reduce_sameRatio.hdf5'
    sdae_wordsFile_h2_reduce = '../../Data/Features/type4_SDAEWords_h2_reduce_sameRatio.hdf5'
    sift_wordsFile_h1 = '../../Data/Features/type4_SIFTWords_h1.hdf5'
    sift_wordsFile_h2 = '../../Data/Features/type4_SIFTWords_h2.hdf5'
    sift_wordsFile_h1_reduce = '../../Data/Features/type4_SIFTWords_h1_reduce.hdf5'
    sift_wordsFile_h2_reduce = '../../Data/Features/type4_SIFTWords_h2_reduce.hdf5'
    lbp_wordsFile_h1_reduce = '../../Data/Features/type4_LBPWords_h1_reduce_sameRatio.hdf5'
    lbp_wordsFile_h2_reduce = '../../Data/Features/type4_LBPWords_h2_reduce_sameRatio.hdf5'
    lbp_wordsFile_h1 = '../../Data/Features/type4_LBPWords_h1.hdf5'
    lbp_wordsFile_h2 = '../../Data/Features/type4_LBPWords_h2.hdf5'
    SIFTFeaFile = '../../Data/Features/type4_SIFTFeatures.hdf5'
    SDAEFeaFile = '../../Data/Features/type4_SDAEFeas.hdf5'
    LBPFeaFile = '../../Data/Features/type4_LBPFeatures.hdf5'

    sift_saveName_h1 = '../../Data/Features/type4_SIFTWords_h1_s16_600_300_300_300.hdf5'
    sift_saveName_h2 = '../../Data/Features/type4_SIFTWords_h2_s16_600_300_300_300.hdf5'
    sdae_saveName_h1 = '../../Data/Features/type4_SDAEWords_h1_diff_mean_s16_600_300_300_300.hdf5'
    sdae_saveName_h2 = '../../Data/Features/type4_SDAEWords_h2_diff_mean_s16_600_300_300_300.hdf5'
    sdae_saveName_h1_s = '../../Data/Features/type4_SDAEWords_h1_same_mean_s16_600_300_300_300.hdf5'
    sdae_saveName_h2_s = '../../Data/Features/type4_SDAEWords_h2_same_mean_s16_600_300_300_300.hdf5'
    lbp_saveName_h1 = '../../Data/Features/type4_LBPWords_h1_s16_600_300_300_300.hdf5'
    lbp_saveName_h2 = '../../Data/Features/type4_LBPWords_h2_s16_600_300_300_300.hdf5'

    sizeRange = (16, 16)
    imResize = (256, 256)
    imgSize = (440, 440)
    nk = 19
    resolution = 5
    gridSize = np.array([resolution, resolution])
    im = np.array(imread(imgFile), dtype='f') / 255
    th1 = 0.5
    th2 = 0.5
    im_name = imgFile[-20:-4]

    # ----------------save sift------------------
    feaVectors, posVectors = glf.genImgLocalFeas(imgFile,
                                                 'SIFT',
                                                 gridSize,
                                                 sizeRange,
                                                 imResize=None)
    if isReduce:
        feaVectors = pca.reduceVecFeasDim(SIFTFeaFile, feaVectors, 64)
        heats = generateHeatMaps2by2(sift_wordsFile_h1_reduce,
                                     sift_wordsFile_h2_reduce, feaVectors,
                                     posVectors, gridSize, nk, th1, th2)
    else:
        heats = generateHeatMaps2by2(sift_saveName_h1, sift_saveName_h2,
                                     feaVectors, posVectors, gridSize, nk, th1,
                                     th2)

    for c, m in heats.iteritems():
        map3 = np.transpose(m, (1, 0, 2)).reshape(440, 440 * 3)
        map3 = np.append(im, map3, axis=1)
        if isReduce:
            imsave(im_name + '_SIFT_reduce_' + c + '_th' + str(th1) + '.jpg',
                   map3)
        else:
            imsave(im_name + '_SIFT_' + c + '_th' + str(th1) + '.jpg', map3)

    # ----------------save lbp------------------
    feaVectors, posVectors = glf.genImgLocalFeas(imgFile,
                                                 'LBP',
                                                 gridSize,
                                                 sizeRange,
                                                 imResize=None)
    if isReduce:
        feaVectors = pca.reduceVecFeasDim(LBPFeaFile, feaVectors, 8)
        heats = generateHeatMaps2by2(lbp_wordsFile_h1_reduce,
                                     lbp_wordsFile_h2_reduce, feaVectors,
                                     posVectors, gridSize, nk, th1, th2)
    else:
        heats = generateHeatMaps2by2(lbp_saveName_h1, lbp_saveName_h2,
                                     feaVectors, posVectors, gridSize, nk, th1,
                                     th2)

    for c, m in heats.iteritems():
        map3 = np.transpose(m, (1, 0, 2)).reshape(440, 440 * 3)
        map3 = np.append(im, map3, axis=1)
        if isReduce:
            imsave(im_name + '_LBP_reduce_' + c + '_th' + str(th1) + '.jpg',
                   map3)
        else:
            imsave(im_name + '_LBP_' + c + '_th' + str(th1) + '.jpg', map3)

    # ---------------show SDEA local results--------------
    # define SDAE parameters
    # sdaePara = {}
    # # sdaePara['weight'] = '../../Data/autoEncoder/final_0.01.caffemodel'
    # # sdaePara['weight'] = '../../Data/autoEncoder/layer_diff_mean_final.caffemodel'
    # sdaePara['weight'] = '../../Data/autoEncoder/layer_diff_mean_s16_final.caffemodel'
    # sdaePara['net'] = '../../Data/autoEncoder/test_net.prototxt'
    # sdaePara['meanFile'] = '../../Data/patchData_mean_s16.txt'
    # sdaePara['patchMean'] = True
    # channels = 1
    # # layerNeuronNum = [28 * 28, 2000, 1000, 500, 128]
    # layerNeuronNum = [16 * 16, 1000, 1000, 500, 64]
    # sdaePara['layerNeuronNum'] = layerNeuronNum
    # _, gl, _ = esg.generateGridPatchData(imgFile, gridSize, sizeRange)
    # batchSize = len(gl)
    # inputShape = (batchSize, channels, 16, 16)
    # sdaePara['inputShape'] = inputShape
    #
    # feaVectors, posVectors = glf.genImgLocalFeas(imgFile, 'SDAE', gridSize, sizeRange, sdaePara=sdaePara)
    # if isReduce:
    #     feaVectors = pca.reduceVecFeasDim(SDAEFeaFile, feaVectors, 9)
    #     heats = generateHeatMaps2by2(sdae_wordsFile_h1_reduce, sdae_wordsFile_h2_reduce, feaVectors,
    #                                  posVectors, gridSize, nk, th1, th2)
    # else:
    #     heats = generateHeatMaps2by2(sdae_saveName_h1, sdae_saveName_h2, feaVectors, posVectors,
    #                                  gridSize, nk, th1, th2)
    # for c, m in heats.iteritems():
    #     map3 = np.transpose(m, (1, 0, 2)).reshape(440, 440 * 3)
    #     map3 = np.append(map3, im, axis=1)
    #     if isReduce:
    #         imsave(im_name + '_SDAE_reduce_' + c + '_th' + str(th1) + '.jpg', map3)
    #     else:
    #         imsave(im_name + '_SDAE_' + c + '_th' + str(th1) + '.jpg', map3)
    return 0
Ejemplo n.º 5
0
def region_category_map(paras):
    imgFile = paras['imgFile']
    sift_wordsFile_h1 = paras['sift_wordsFile_h1']
    sift_wordsFile_h2 = paras['sift_wordsFile_h2']
    sdae_wordsFile_h1_d = paras['sdae_wordsFile_h1_d']
    sdae_wordsFile_h2_d = paras['sdae_wordsFile_h2_d']
    sdae_wordsFile_h1_s = paras['sdae_wordsFile_h1_s']
    sdae_wordsFile_h2_s = paras['sdae_wordsFile_h2_s']
    lbp_wordsFile_h1 = paras['lbp_wordsFile_h1']
    lbp_wordsFile_h2 = paras['lbp_wordsFile_h2']
    cascade_wordsFile = paras['cascade_wordsFile']
    siftFeaFile_reduce = paras['SIFTFeaFile_reduce']
    sdaeFeaFile_reduce_d = paras['SDAEFeaFile_reduce_d']
    sdaeFeaFile_reduce_s = paras['SDAEFeaFile_reduce_s']
    lbpFeaFile_reduce = paras['LBPFeaFile_reduce']
    k = paras['k']
    minSize = paras['minSize']
    patchSize = paras['patchSize']
    region_patch_ratio = paras['region_patch_ratio']
    sigma = paras['sigma']
    th = paras['th']
    sizeRange = paras['sizeRange']
    nk = paras['nk']
    gridSize = paras['gridSize']
    eraseMap = paras['eraseMap']
    im = paras['im']
    sdaePara = paras['sdaePara']
    feaType = paras['feaType']
    if feaType == 'SIFT':
        wordsFile_h1 = sift_wordsFile_h1
        wordsFile_h2 = sift_wordsFile_h2
        feaType_r = feaType
    if feaType == 'LBP':
        wordsFile_h1 = lbp_wordsFile_h1
        wordsFile_h2 = lbp_wordsFile_h2
        feaType_r = feaType
    if feaType == 'SDAE_d':
        wordsFile_h1 = sdae_wordsFile_h1_d
        wordsFile_h2 = sdae_wordsFile_h2_d
        sdaePara['weight'] = sdaePara['weight_d']
        sdaePara['patchMean'] = True
        feaType_r = 'SDAE'
    if feaType == 'SDAE_s':
        wordsFile_h1 = sdae_wordsFile_h1_s
        wordsFile_h2 = sdae_wordsFile_h2_s
        sdaePara['weight'] = sdaePara['weight_s']
        sdaePara['patchMean'] = False
        feaType_r = 'SDAE'
    if feaType == 'cascade':
        sift_f = h5py.File(siftFeaFile_reduce, 'r')
        sdae_f_d = h5py.File(sdaeFeaFile_reduce_d, 'r')
        sdae_f_s = h5py.File(sdaeFeaFile_reduce_s, 'r')
        lbp_f = h5py.File(lbpFeaFile_reduce, 'r')
        sift_u = np.array(sift_f.get('uSet/u'))
        lbp_u = np.array(lbp_f.get('uSet/u'))
        sdae_u_d = np.array(sdae_f_d.get('uSet/u'))
        sdae_u_s = np.array(sdae_f_s.get('uSet/u'))
        feaType_r = feaType

    im_name = imgFile[-20:-4]
    F0, region_patch_list = gsr.generate_subRegions(imgFile, patchSize,
                                                    region_patch_ratio,
                                                    eraseMap, k, minSize,
                                                    sigma)
    maps2by2 = {}
    for ri in range(len(region_patch_list)):
        r = region_patch_list[ri]
        batchSize = len(r)
        inputShape = (batchSize, 1, sizeRange[0], sizeRange[0])
        sdaePara['inputShape'] = inputShape
        if len(r) != 0:
            if feaType_r != 'cascade':
                feaVectors, posVectors = glf.genImgLocalFeas(imgFile,
                                                             feaType_r,
                                                             gridSize,
                                                             sizeRange,
                                                             gridList=r,
                                                             sdaePara=sdaePara)
                labels = chm.calPatchLabels2by2(wordsFile_h1, wordsFile_h2,
                                                feaVectors, nk)
            else:
                feaVectors, posVectors = casf.extractCascadeFeatures(
                    imgFile, sift_u, lbp_u, sdae_u_d, sdae_u_s, r, gridSize,
                    sizeRange, sdaePara)
                labels = chm.calPatchLabels2by2_noH(cascade_wordsFile,
                                                    feaVectors, nk)

            for k, v in labels.iteritems():
                v = list(v.flatten())
                if k not in maps2by2:
                    maps2by2[k] = np.zeros((3, F0.shape[0], F0.shape[1]))
                c1 = float(v.count(0)) / float(len(v))
                c2 = float(v.count(1)) / float(len(v))
                cc = float(v.count(2)) / float(len(v))
                cs = np.array([c1, c2, cc])
                cs[np.where(cs < th)] = 0

                maps2by2[k][0][np.where(F0 == ri)] = cs[0]
                maps2by2[k][1][np.where(F0 == ri)] = cs[1]
                maps2by2[k][2][np.where(F0 == ri)] = cs[2]

    for c, m in maps2by2.iteritems():
        map3 = np.transpose(m, (1, 0, 2)).reshape(440, 440 * 3)
        map3 = np.append(map3, im, axis=1)
        imsave(im_name + '_' + feaType + '_' + c + '_region' + '.jpg', map3)
Ejemplo n.º 6
0
def region_special_map(paras, isReturnMaps=None, returnFilteroutLabels=False):
    imgFile = paras['imgFile']
    img = paras['img']
    k = paras['k']
    minSize = paras['minSize']
    patchSize = paras['patchSize']
    region_patch_ratio = paras['region_patch_ratio']
    sigma = paras['sigma']
    th = paras['th']
    sizeRange = paras['sizeRange']
    nk = paras['nk']
    gridSize = paras['gridSize']
    eraseMap = paras['eraseMap']
    im = paras['im']
    feaType = paras['feaType']

    sdaePara = paras['sdaePara']
    types = paras['types']
    withIntensity = paras['withIntensity']
    diffResolution = paras['diffResolution']
    isSave = paras['isSave']
    specialType = paras['specialType']
    returnRegionLabels = paras['returnRegionLabels']
    train = paras['train']
    is_rotate = paras['is_rotate']

    if feaType == 'His':
        his_wordsFile_s1 = paras['his_wordsFile_s1']
        his_wordsFile_s2 = paras['his_wordsFile_s2']
        his_wordsFile_s3 = paras['his_wordsFile_s3']
        his_wordsFile_s4 = paras['his_wordsFile_s4']
        wordsFile_s = [
            his_wordsFile_s1, his_wordsFile_s2, his_wordsFile_s3,
            his_wordsFile_s4
        ]

    if feaType == 'LBP':
        lbp_wordsFile_s1 = paras['lbp_wordsFile_s1']
        lbp_wordsFile_s2 = paras['lbp_wordsFile_s2']
        lbp_wordsFile_s3 = paras['lbp_wordsFile_s3']
        lbp_wordsFile_s4 = paras['lbp_wordsFile_s4']
        wordsFile_s = [
            lbp_wordsFile_s1, lbp_wordsFile_s2, lbp_wordsFile_s3,
            lbp_wordsFile_s4
        ]

    if feaType == 'SIFT':
        sift_wordsFile_s1 = paras['sift_wordsFile_s1']
        sift_wordsFile_s2 = paras['sift_wordsFile_s2']
        sift_wordsFile_s3 = paras['sift_wordsFile_s3']
        sift_wordsFile_s4 = paras['sift_wordsFile_s4']
        wordsFile_s = [
            sift_wordsFile_s1, sift_wordsFile_s2, sift_wordsFile_s3,
            sift_wordsFile_s4
        ]

    if feaType == 'SDAE':
        sdae_wordsFile_s1 = paras['sdae_wordsFile_s1']
        sdae_wordsFile_s2 = paras['sdae_wordsFile_s2']
        sdae_wordsFile_s3 = paras['sdae_wordsFile_s3']
        sdae_wordsFile_s4 = paras['sdae_wordsFile_s4']
        wordsFile_s = [
            sdae_wordsFile_s1, sdae_wordsFile_s2, sdae_wordsFile_s3,
            sdae_wordsFile_s4
        ]

    thresh = paras['thresh']
    mk = paras['mk']
    im_name = imgFile[-20:-4]
    if returnFilteroutLabels:
        F0, region_patch_list, eraseLabels, filterout_labels = gsr.generate_subRegions(
            img,
            patchSize,
            region_patch_ratio,
            eraseMap,
            k,
            minSize,
            sigma,
            thresh=thresh,
            diffResolution=diffResolution,
            returnFilteroutLabels=returnFilteroutLabels)
    else:
        F0, region_patch_list, eraseLabels = gsr.generate_subRegions(
            img,
            patchSize,
            region_patch_ratio,
            eraseMap,
            k,
            minSize,
            sigma,
            thresh=thresh,
            diffResolution=diffResolution)
    maps2by2 = {}
    region_labels = {}
    for ri in range(len(region_patch_list)):
        r = region_patch_list[ri]
        if sdaePara is not None:
            batchSize = len(r)
            inputShape = (batchSize, 1, sizeRange[0], sizeRange[0])
            sdaePara['inputShape'] = inputShape
        if len(r) != 0:
            # if feaType == 'LBP':
            feaVectors, posVectors = glf.genImgLocalFeas(
                imgFile,
                feaType,
                gridSize,
                sizeRange,
                gridList=r,
                sdaePara=sdaePara,
                withIntensity=withIntensity)
            labels = {}
            if specialType is not None:
                w = wordsFile_s[specialType]
                # print feaVectors.shape
                labelVec = chm.calPatchLabels2(w,
                                               feaVectors,
                                               k=nk,
                                               two_classes=['1', '2'],
                                               isH1=True,
                                               mk=mk)
                name_s = types[specialType] + '_rest'
                labels[name_s] = labelVec
            else:
                for wi in range(len(wordsFile_s)):
                    w = wordsFile_s[wi]
                    labelVec = chm.calPatchLabels2(w,
                                                   feaVectors,
                                                   k=nk,
                                                   two_classes=['1', '2'],
                                                   isH1=True,
                                                   mk=mk)
                    name_s = types[wi] + '_rest'
                    labels[name_s] = labelVec

            for k, v in labels.iteritems():
                v = list(v.flatten())
                if k not in maps2by2:
                    maps2by2[k] = np.zeros((3, F0.shape[0], F0.shape[1]))
                    region_labels[k] = [[], [], []]
                c1 = float(v.count(0)) / float(len(v))
                c2 = float(v.count(1)) / float(len(v))
                cc = float(v.count(2)) / float(len(v))
                cs = np.array([c1, c2, cc])
                cs[np.where(cs < th)] = 0

                maps2by2[k][0][np.where(F0 == ri)] = cs[0]
                maps2by2[k][1][np.where(F0 == ri)] = cs[1]
                maps2by2[k][2][np.where(F0 == ri)] = cs[2]
                if cs[0] > 0:
                    region_labels[k][0].append(ri)
                if cs[1] > 0:
                    region_labels[k][1].append(ri)
                if cs[2] > 0:
                    region_labels[k][2].append(ri)
    if isReturnMaps is not None:
        return maps2by2, region_labels.values()[0][2], F0
    if isSave:
        for c, m in maps2by2.iteritems():
            map3 = np.transpose(m, (1, 0, 2)).reshape(440, 440 * 3)
            map3 = np.append(map3, im, axis=1)
            imsave(im_name + '_' + feaType + '_' + c + '_region' + '.jpg',
                   map3)

    if train:
        return F0, region_labels, eraseLabels
    else:
        returnlabels = []
        if is_rotate:
            specialLabels = []
        for i in returnRegionLabels:
            for _, v in region_labels.iteritems():
                returnlabels = returnlabels + v[i]
                if is_rotate:
                    specialLabels = specialLabels + v[0]
        if is_rotate:
            return F0, returnlabels, eraseLabels, specialLabels
        else:
            if returnFilteroutLabels:
                return F0, returnlabels, eraseLabels, filterout_labels
            else:
                return F0, returnlabels, eraseLabels