コード例 #1
0
ファイル: gabor.py プロジェクト: ndrwmlnk/textureSegmentation
def runGabor(args):

    infile = args.infile
    if (not os.path.isfile(infile)):
        print infile, ' is not a file!'
        exit(0)

    outfile = args.outfile
    printlocation = os.path.dirname(os.path.abspath(outfile))
    _utils.deleteExistingSubResults(printlocation)

    M_transducerWindowSize = args.M
    if ((M_transducerWindowSize % 2) == 0):
        print 'Gaussian window size not odd, using next odd number'
        M_transducerWindowSize += 1

    k_clusters = args.k
    k_gaborSize = args.gk

    spatialWeight = args.spw
    gammaSigmaPsi = []
    gammaSigmaPsi.append(args.gamma)
    gammaSigmaPsi.append(args.sigma)
    gammaSigmaPsi.append(args.psi)
    variance_Threshold = args.vt
    howManyFeatureImages = args.fi
    R_threshold = args.R
    sigmaWeight = args.siw
    greyOutput = args.c
    printIntermediateResults = args.i

    img = cv2.imread(infile, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    lambdas = getLambdaValues(img)
    filters = build_filters(lambdas, k_gaborSize, gammaSigmaPsi)

    print "Gabor kernels created, getting filtered images"
    filteredImages = getFilterImages(filters, img)
    filteredImages = filterSelection(filteredImages, R_threshold, img,
                                     howManyFeatureImages)
    if (printIntermediateResults):
        _utils.printFeatureImages(filteredImages, "filter", printlocation)

    print "Applying nonlinear transduction with Gaussian smoothing"
    featureImages = nonLinearTransducer(img, filteredImages,
                                        M_transducerWindowSize, sigmaWeight,
                                        filters)
    featureImages = removeFeatureImagesWithSmallVariance(
        featureImages, variance_Threshold)

    if (printIntermediateResults):
        _utils.printFeatureImages(featureImages, "feature", printlocation)

    featureVectors = _utils.constructFeatureVectors(featureImages, img)
    featureVectors = _utils.normalizeData(featureVectors,
                                          False,
                                          spatialWeight=spatialWeight)

    print "Clustering..."
    labels = _utils.clusterFeatureVectors(featureVectors, k_clusters)
    _utils.printClassifiedImage(labels, k_clusters, img, outfile, greyOutput)
コード例 #2
0
def runMoments(args):

    infile = args.infile
    if (not os.path.isfile(infile)):
        print infile, ' is not a file!'
        exit(0)

    outfile = args.outfile
    printlocation = os.path.dirname(os.path.abspath(outfile))
    _utils.deleteExistingSubResults(printlocation)

    k_clusters = args.k
    greyOutput = args.c
    printIntermediateResults = args.i

    W = args.W
    if ((W % 2) == 0):
        print 'size of moments window is not odd, using next odd number'
        W += 1

    L_transducerWindowSize = args.L
    if ((L_transducerWindowSize % 2) == 0):
        print 'size of moments window is not odd, using next odd number'
        L_transducerWindowSize += 1

    pqThreshold = args.pq
    spatialWeight = args.spw
    bruteForceMomentCalc = args.b
    img = cv2.imread(infile, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    print "Applying moment masks, If brute force was selected please wait..."
    momentImages = getAllMomentImages(img, W, pqThreshold,
                                      bruteForceMomentCalc)

    if (printIntermediateResults):
        _utils.printFeatureImages(momentImages, "filter", printlocation)

    print "Applying nonlinear transduction with averaging"
    featureImages = nonLinearTransducer(img, momentImages,
                                        L_transducerWindowSize)

    if (printIntermediateResults):
        _utils.printFeatureImages(featureImages, "feature", printlocation)

    featureVectors = _utils.constructFeatureVectors(featureImages, img)
    featureVectors = _utils.normalizeData(featureVectors,
                                          True,
                                          spatialWeight=spatialWeight)

    print "Clustering..."
    labels = _utils.clusterFeatureVectors(featureVectors, k_clusters)
    _utils.printClassifiedImage(labels, k_clusters, img, outfile, greyOutput)
コード例 #3
0
def runMoments(args):

    infile = args.infile
    if (not os.path.isfile(infile)):
        print(infile, ' is not a file!')
        exit(0)

    outfile = args.outfile
    printlocation = os.path.dirname(os.path.abspath(outfile))
    _utils.deleteExistingSubResults(printlocation)

    k_clusters = args.k
    greyOutput = args.c
    printIntermediateResults = args.i

    W = args.W
    if ((W % 2) == 0):
        print('size of moments window is not odd, using next odd number')
        W += 1

    L_transducerWindowSize = args.L
    if ((L_transducerWindowSize % 2) == 0):
        print('size of moments window is not odd, using next odd number')
        L_transducerWindowSize += 1

    pqThreshold = args.pq
    spatialWeight = args.spw
    bruteForceMomentCalc = args.b
    ################################
    # input may be an image, or a txt file
    ################################
    if infile[-3:] != 'txt':
        img = cv2.imread(infile, 0)
    else:
        test = np.loadtxt(infile)
        img = (test / test.max() * 255).astype('uint8')

    print("Applying moment masks, If brute force was selected please wait...")
    momentImages = getAllMomentImages(img, W, pqThreshold,
                                      bruteForceMomentCalc)

    if (printIntermediateResults):
        _utils.printFeatureImages(momentImages, "filter", printlocation)

    print("Applying nonlinear transduction with averaging")
    featureImages = nonLinearTransducer(img, momentImages,
                                        L_transducerWindowSize)

    if (printIntermediateResults):
        _utils.printFeatureImages(featureImages, "feature", printlocation)

    featureVectors = _utils.constructFeatureVectors(featureImages, img)
    featureVectors = _utils.normalizeData(featureVectors,
                                          True,
                                          spatialWeight=spatialWeight)

    print("Clustering...")
    labels = _utils.clusterFeatureVectors(featureVectors, k_clusters)
    labels = _utils.printClassifiedImage(labels, k_clusters, img, outfile,
                                         greyOutput)
    plt.subplot(121)
    plt.imshow(img)
    plt.xticks([])
    plt.yticks([])
    plt.subplot(122)
    plt.imshow(labels.reshape(img.shape))
    plt.xticks([])
    plt.yticks([])
    plt.tight_layout()
    plt.savefig(outfile)
    plt.close()
コード例 #4
0
def runGabor(infile, outfile, k, gk, M, **args):

    # infile = args.infile
    if (not os.path.isfile(infile)):
        print(infile, ' is not a file!')
        exit(0)

    # outfile = args.outfile
    printlocation = os.path.dirname(outfile)
    _utils.deleteExistingSubResults(printlocation)

    M_transducerWindowSize = M
    if ((M_transducerWindowSize % 2) == 0):
        # print('Gaussian window size not odd, using next odd number')
        M_transducerWindowSize += 1
    # クラスタ数.何個に画像を分割するか
    k_clusters = k
    # ガボールフィルタのサイズ
    k_gaborSize = gk

    # 各種引数設定
    spatialWeight = args['spw']
    gammaSigmaPsi = []
    gammaSigmaPsi.append(args['gamma'])
    gammaSigmaPsi.append(args['sigma'])
    gammaSigmaPsi.append(args['psi'])
    variance_Threshold = args['vt']
    howManyFeatureImages = args['fi']
    R_threshold = args['R']
    sigmaWeight = args['siw']
    greyOutput = args['c']
    printIntermediateResults = args['i']

    # 画像の読み込み
    img = cv2.imread(infile, cv2.IMREAD_GRAYSCALE)
    # lambdaの取得.サンプリングレートを決める.
    lambdas = getLambdaValues(img)
    # ガボールフィルタの作成
    filters = build_filters(lambdas, k_gaborSize, gammaSigmaPsi)

    # print("Gabor kernels created, getting filtered images")

    # ガボールフィルタを入力画像にかける.
    filteredImages = getFilterImages(filters, img)
    # どのフィルタを利用するか選ぶ
    filteredImages = filterSelection(filteredImages, R_threshold, img,
                                     howManyFeatureImages)

    if (printIntermediateResults):
        _utils.printFeatureImages(filteredImages, "filter", printlocation,
                                  infile)

    # print("Applying nonlinear transduction with Gaussian smoothing")

    featureImages = nonLinearTransducer(img, filteredImages,
                                        M_transducerWindowSize, sigmaWeight,
                                        filters)
    # 分散の小さいデータを除く.
    featureImages = removeFeatureImagesWithSmallVariance(
        featureImages, variance_Threshold)

    if (printIntermediateResults):
        _utils.printFeatureImages(featureImages, "feature", printlocation,
                                  infile)

    # 特徴ベクトルの作成.
    featureVectors = _utils.constructFeatureVectors(featureImages, img)
    # 特徴ベクトルの保存
    featureVectors = _utils.normalizeData(featureVectors,
                                          False,
                                          spatialWeight=spatialWeight)
    _utils.printFeatureVectors(printlocation, infile, featureVectors)

    # print("Clustering...")
    labels = _utils.clusterFeatureVectors(featureVectors, k_clusters)
    _utils.printClassifiedImage(labels, k_clusters, img, outfile, greyOutput)
コード例 #5
0
def runGabor(args):
    infile = args.infile
    if (not os.path.isfile(infile)):
        print(infile, " is not a file!")
        exit(0)

    outfile = args.outfile
    printlocation = os.path.dirname(os.path.abspath(outfile))
    _utils.deleteExistingSubResults(printlocation)

    M_transducerWindowSize = args.M
    if M_transducerWindowSize == 0:
        on_off_M = False
    else:
        on_off_M = True
    if ((M_transducerWindowSize % 2) == 0):
        print('Gaussian window size not odd, using next odd number')
        M_transducerWindowSize += 1

    k_clusters = args.k
    k_gaborSize = args.gk

    spatialWeight = args.spw
    gammaSigmaPsi = []
    gammaSigmaPsi.append(args.gamma)
    gammaSigmaPsi.append(args.sigma)
    gammaSigmaPsi.append(args.psi)
    variance_Threshold = args.vt
    howManyFeatureImages = args.fi
    R_threshold = args.R
    sigmaWeight = args.siw
    greyOutput = args.c
    printIntermediateResults = args.i

    if int(cv2.__version__[0]) >= 3:
        img = cv2.imread(infile, 0)
    else:
        img = cv2.imread(infile, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    img = cv2.resize(img, (1200, 1200), interpolation=cv2.INTER_NEAREST)

    lambdas = getLambdaValues(img)
    filters = build_filters(lambdas, k_gaborSize, gammaSigmaPsi)

    print("Gabor kernels created, getting filtered images")
    filteredImages = getFilterImages(filters, img)
    filteredImages = filterSelection(filteredImages, R_threshold, img,
                                     howManyFeatureImages)
    if (printIntermediateResults):
        _utils.printFeatureImages(filteredImages, "filter", printlocation)

    print("Applying nonlinear transduction with Gaussian smoothing")
    if on_off_M:
        featureImages = nonLinearTransducer(img, filteredImages,
                                            M_transducerWindowSize,
                                            sigmaWeight, filters)
    else:
        featureImages = filteredImages
    featureImages = removeFeatureImagesWithSmallVariance(
        featureImages, variance_Threshold)

    if (printIntermediateResults):
        _utils.printFeatureImages(featureImages, "feature", printlocation)

    featureVectors = _utils.constructFeatureVectors(featureImages, img)
    featureVectors = _utils.normalizeData(featureVectors,
                                          False,
                                          spatialWeight=spatialWeight)

    temp_otfs = os.path.split(outfile)
    outfile_csv = os.path.join(temp_otfs[0],
                               temp_otfs[1].split('.')[0] + '.csv')
    save_csv(featureVectors, outfile_csv)

    print("Clustering...")
    labels = _utils.clusterFeatureVectors(featureVectors, k_clusters)
    _utils.printClassifiedImage(labels, k_clusters, img, outfile, greyOutput)
コード例 #6
0
def runGabor(args):

    infile = args.infile
    if (not os.path.isfile(infile)):
        print(infile, " is not a file!")
        exit(0)

    outfile = args.outfile
    printlocation = os.path.dirname(os.path.abspath(outfile))
    _utils.deleteExistingSubResults(printlocation)

    M_transducerWindowSize = args.M
    if ((M_transducerWindowSize % 2) == 0):
        print('Gaussian window size not odd, using next odd number')
        M_transducerWindowSize += 1

    k_clusters = args.k
    k_gaborSize = args.gk

    spatialWeight = args.spw
    gammaSigmaPsi = []
    gammaSigmaPsi.append(args.gamma)
    gammaSigmaPsi.append(args.sigma)
    gammaSigmaPsi.append(args.psi)
    variance_Threshold = args.vt
    howManyFeatureImages = args.fi
    R_threshold = args.R
    sigmaWeight = args.siw
    greyOutput = args.c
    printIntermediateResults = args.i
    if infile[-3:] != 'txt':
        if int(cv2.__version__[0]) >= 3:
            img = cv2.imread(infile, 0)
        else:
            img = cv2.imread(infile, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        test = np.loadtxt(infile)
        img = (test / test.max() * 255).astype('uint8')

    lambdas = getLambdaValues(img)
    filters = build_filters(lambdas, k_gaborSize, gammaSigmaPsi)

    print("Gabor kernels created, getting filtered images")
    filteredImages = getFilterImages(filters, img)
    filteredImages = filterSelection(filteredImages, R_threshold, img,
                                     howManyFeatureImages)
    if (printIntermediateResults):
        _utils.printFeatureImages(filteredImages, "filter", printlocation)

    print("Applying nonlinear transduction with Gaussian smoothing")
    featureImages = nonLinearTransducer(img, filteredImages,
                                        M_transducerWindowSize, sigmaWeight,
                                        filters)
    featureImages = removeFeatureImagesWithSmallVariance(
        featureImages, variance_Threshold)

    if (printIntermediateResults):
        _utils.printFeatureImages(featureImages, "feature", printlocation)

    featureVectors = _utils.constructFeatureVectors(featureImages, img)
    featureVectors = _utils.normalizeData(featureVectors,
                                          False,
                                          spatialWeight=spatialWeight)

    print("Clustering...")
    labels = _utils.clusterFeatureVectors(featureVectors, k_clusters)
    labels = _utils.printClassifiedImage(labels, k_clusters, img, outfile,
                                         greyOutput)
    plt.subplot(121)
    plt.imshow(img)
    plt.xticks([])
    plt.yticks([])
    plt.subplot(122)
    plt.imshow(labels.reshape(img.shape))
    plt.xticks([])
    plt.yticks([])
    plt.tight_layout()
    plt.savefig(outfile)
    plt.close()