Example #1
0
def gera_GABOR_GLCM_LPB_features(sujeito):
    lstImagens = input.load_image('./Publication_Dataset/' + sujeito +
                                  '/TIFFs/8bitTIFFs/')

    volumeFeatures = []

    for imagem in lstImagens:
        frame_denoise = input.apply_filter(imagem, 'anisotropic')
        bdValue, new = flatten.flat_image(frame_denoise)
        crop = cropping.croppy_mona(new, bdValue)
        image_features = []

        image_features += features_extraction.apply_gabor(crop).tolist()
        crop2 = crop.astype(int)
        image_features += features_extraction.apply_glcm(crop2)
        image_features += features_extraction.apply_lbp(crop).tolist()

        volumeFeatures += image_features

        # features_extraction.apply_sift(crop)
        # lstSIFTFeatures.append(features_extraction.apply_sift(crop))

    print('gabor', len(volumeFeatures))
    # dictionary = features_extraction.apply_BOW(lstSIFTFeatures)
    fileObject = open('./gabor_glcm_lbp_repLine/' + sujeito, 'wb')
    if (fileObject != None):
        print('salvando...')
        pickle.dump(volumeFeatures, fileObject)
        fileObject.close
Example #2
0
def testarEnquadramento():
    lstImagens = input.load_image(
        './Publication_Dataset/AMD6/TIFFs/8bitTIFFs/')

    for imagem in lstImagens:
        # frame_gauss = input.apply_filter(imagem,'gauss')
        frame_dif = input.apply_filter(imagem, 'anisotropic')
        bdValue, new = flatten.flat_image(frame_dif)
        crop = cropping.croppy_mona(new, bdValue)
Example #3
0
def extractFeatures(sujeito):
    lstImagens = input.load_image('./base_interpol/' + sujeito)

    lstGeoFeatures = []

    for imagem in lstImagens:
        imagem = np.asarray(imagem)
        frame_denoise = input.apply_filter(imagem, 'anisotropic')
        bdValue, new = flatten.flat_image(frame_denoise)
        crop = cropping.croppy_mona(new, bdValue)
        lstGeoFeatures.append(geo.run((crop), [crop.shape[0], crop.shape[1]]))

    print('geo', len(lstGeoFeatures), len(lstGeoFeatures[0]))
    # dictionary = features_extraction.apply_BOW(lstSIFTFeatures)
    fileObject = open('./geo_features/' + sujeito, 'wb')
    if (fileObject != None):
        print('salvando...')
        pickle.dump(lstGeoFeatures, fileObject)
        fileObject.close
Example #4
0
def geraGABORFeatures(sujeito):
    lstImagens = input.load_image('./Publication_Dataset/' + sujeito +
                                  '/TIFFs/8bitTIFFs/')

    lstGABORFeatures = []

    for imagem in lstImagens:
        frame_denoise = input.apply_filter(imagem, 'anisotropic')
        bdValue, new = flatten.flat_image(frame_denoise)
        crop = cropping.croppy_mona(new, bdValue)
        lstGABORFeatures.append(features_extraction.apply_gabor(crop).tolist())
        # features_extraction.apply_sift(crop)
        # lstSIFTFeatures.append(features_extraction.apply_sift(crop))

    print('gabor', len(lstGABORFeatures), len(lstGABORFeatures[0]))
    # dictionary = features_extraction.apply_BOW(lstSIFTFeatures)
    fileObject = open('./gabor_features/' + sujeito, 'wb')
    if (fileObject != None):
        print('salvando...')
        pickle.dump(lstGABORFeatures, fileObject)
        fileObject.close
Example #5
0
        print('tam:', len(volumeInLine))
        featuresGeo.append(volumeInLine)
        vetLabelsGeo.append(getClass(vol))

    print('Gerando arff file for glcm', len(vetLabelsGeo), len(featuresGeo))
    arffGenerator.createArffFile('./geo_features/GEODATASET',
                                 featuresGeo, vetLabelsGeo, 'DME,NORMAL',
                                 len(featuresGeo[0]))


# getBaseFeatures()
# extractFeatures("DME7")
# loadFeatures()
# import math

lstImagens = input.load_image('./base_interpol/DME7')
imagem = lstImagens[2]
# print  lstImagens[3][79][1]
# print  lstImagens[3][76][0]
# map(lambda x: x if x<255 else 0, imagem)

imagem = np.asarray(imagem)
frame_denoise = input.apply_filter(imagem, 'anisotropic')
bdValue, new = flatten.flat_image(frame_denoise)

crop = cropping.croppy_mona(new, bdValue)
plt.figure()
plt.subplot(121)
plt.imshow(crop, 'gray')
plt.show()
Example #6
0
y_shop = shop_network.relu6

sess.run(tf.initialize_all_variables())
#shop_path = '/ais/gobi4/fashion/retrieval/test_gallery.json'
shop_path = '/ais/gobi4/fashion/retrieval/alex_full_test_gallery.json'
img_path = '/ais/gobi4/fashion/data/Cross-domain-Retrieval/'
with open(shop_path, 'w') as jsonfile:
    with open(
            '/ais/gobi4/fashion/data/Cross-domain-Retrieval/list_test_pairs.txt',
            'rb') as f:
        data = f.readlines()
        for line in data:
            line = line.split()
            #print("line[3]:{0}".format(line[3]))
            x = input.load_image(img_path + line[1])
            #x = input.load_image(img_path+line[2])
            x = x.reshape([1, 227, 227, 3])
            feed_dict = {x_shop: x, train_mode: False}
            y = sess.run([y_shop], feed_dict=feed_dict)
            #            y, conv1, lrn1, pool1, conv2, lrn2, pool2, conv3, conv4, conv5,pool3, fc6, relu6_ori, fc7, relu7, fc8  = sess.run([y_shop, shop_network.conv1, shop_network.lrn1, shop_network.pool1, shop_network.conv2, shop_network.lrn2, shop_network.pool2, shop_network.conv3, shop_network.conv4, shop_network.conv5, shop_network.pool3, shop_network.fc6, shop_network.relu6, shop_network.fc7, shop_network.relu7, shop_network.fc8], feed_dict=feed_dict)
            y = np.asarray(y)
            jsondata = {'id': line[2], 'shop_feature': y.tolist()}
            jsonfile.write(json.dumps(jsondata) + '\n')

            #fc8 = np.asarray(fc8)
            #jsondata = {'fc8': fc8.tolist()}
            #jsonfile.write(json.dumps(jsondata)+'\n')

            #relu7 = np.asarray(relu7)
            #jsondata = {'relu7': relu7.tolist()}
street_network.build(rgb=x_street, flag="street", train_mode=train_mode)

y_street = street_network.relu6

sess.run(tf.initialize_all_variables())
street_path = '/ais/gobi4/fashion/retrieval/alex_full_street_features.json'
img_path = '/ais/gobi4/fashion/data/Cross-domain-Retrieval/'
with open(street_path, 'w') as jsonfile:
    with open(
            '/ais/gobi4/fashion/data/Cross-domain-Retrieval/test_pairs_category.txt',
            'rb') as f:
        #data = random.sample(f.readlines(), 200)
        data = f.readlines()
        for line in data:
            line = line.split()
            # x1 = string.atoi(line[3])
            # y1 = string.atoi(line[4])
            # x2 = string.atoi(line[5])
            # y2 = string.atoi(line[6])
            # x = bbox_input.load_image(img_path+line[1], x1, y1, x2, y2)
            # x = x.reshape([1, 227, 227, 3])
            street_path = line[1]
            x = input.load_image(street_path)
            feed_dict = {x_street: x, train_mode: False}
            y = sess.run([y_street], feed_dict=feed_dict)
            y = np.asarray(y)
            jsondata = {'id': line[0], 'street_feature': y.tolist()}
            jsonfile.write(json.dumps(jsondata) + '\n')
    f.close()
jsonfile.close()