コード例 #1
0
ファイル: run_indexing.py プロジェクト: rgaiacs/imagesearch
def cnn_features_extraction(path_database,path_retrieval,path_cnn_trained,folders,image_format,feature_extraction_method,list_of_parameters):
    
    if(feature_extraction_method == "cnn_training" or feature_extraction_method == "cnn_probability_training"):
        parameters = Parameters(256 ,path_database,folders,image_format, path_database + "database/",list_of_parameters)
        
        #se precisar fazer treianmento
        if(parameters.NUM_EPOCHS > 0):
            _,_ = train_cnn_tensorFlow.train(parameters)
        
        file = path_database + "features/result" + "_" + feature_extraction_method + ".csv"
        
        if os.path.isfile(file) and parameters.NUM_EPOCHS == 0: 
            fname_database = []
            reader = csv.reader(open(file),delimiter=',')
            x = list(reader)
            feature_vectors_database = np.array(x).astype('float')
            for f in folders:
                a = glob(f+'*.'+image_format)
                for i in range(len(a)):
                    fname_database.append(a[i])
        else:
            feature_vectors_database, _, fname_database = image_feature_extraction_tensorFlow.features_extraction(parameters)
            np.savetxt(file, feature_vectors_database,delimiter = ',')
        
        #calling the extraction of features for the retrieval images
        parameters.PATH_TEST = path_retrieval
        parameters.CLASSES = []
        feature_vectors_retrieval,ims_retrieval,_ = image_feature_extraction_tensorFlow.features_extraction(parameters)
        
        return fname_database, feature_vectors_database, ims_retrieval, feature_vectors_retrieval, file
    
    elif(feature_extraction_method == "cnn" or feature_extraction_method == "cnn_probability"):
        
        file = path_database + "features/result" + "_" + feature_extraction_method + ".csv"
        
        if os.path.isfile(file): 
            fname_database = []
            reader = csv.reader(open(file),delimiter=',')
            x = list(reader)
            feature_vectors_database = np.array(x).astype('float')
            for f in folders:
                a = glob(f+'*.'+image_format)
                for i in range(len(a)):
                    fname_database.append(a[i])
        else:
            feature_vectors_database, fname_database, _ = inception_feature_extraction.features_extraction(path_database + "database/",path_cnn_trained,image_format, feature_extraction_method,True)
            np.savetxt(file, feature_vectors_database,delimiter = ',')
            
        feature_vectors_retrieval,_ , ims_retrieval  = inception_feature_extraction.features_extraction(path_retrieval,path_cnn_trained,image_format, feature_extraction_method,False)
        return fname_database, feature_vectors_database, ims_retrieval, feature_vectors_retrieval, file
コード例 #2
0
def run_command_line(path_database, path_retrieval, path_cnn_trained,
                     feature_extraction_method, distance, number_of_images,
                     list_of_parameters):
    '''
    This is the main function of the pycbir project, the interface will call this function
    Parameters:
        path_databse:
            Complete path of the database folder
        path_retrieval:
            Complete path of the retrieval images folder, if this value is '', then we will compute the retrieval for one image.
        path_image:
            Complete path of a single retrievial image
    '''

    folders = glob(path_database + 'database/*/')
    image_format = get_extension(folders)

    # this is a particular case of cnn to feature extraction inception
    if feature_extraction_method == 'cnn' or feature_extraction_method == 'cnn_probability':
        if not ("." + image_format) in path_retrieval:
            #ims_database,fname_database,feature_vectors_database,ims_retrieval,_,feature_vectors_retrieval = CNN_feature_extraction.cnn_features_extraction_probability(path_database, path_retrieval, 0, image_format,feature_extraction_method,1,list_of_parameters)
            ims_database, fname_database, feature_vectors_database, ims_retrieval, _, feature_vectors_retrieval = CNN_feature_extraction.cnn_features_extraction_using_tensorFlow(
                path_database, path_retrieval, path_cnn_trained, image_format,
                feature_extraction_method)

        else:  #O retrieval para uma imagem ainda esta usando o theano...
            ims_database, fname_database, feature_vectors_database, ims_retrieval, _, feature_vectors_retrieval = CNN_feature_extraction.cnn_features_extraction_probability(
                path_database, path_retrieval, -1, image_format,
                feature_extraction_method, 1, list_of_parameters)

    #training the CNN with the database
    elif feature_extraction_method == 'cnn_training' or feature_extraction_method == 'cnn_probability_training':
        if not ("." + image_format) in path_retrieval:

            parameters = Parameters(256, path_database, folders, image_format,
                                    path_database + "database/",
                                    list_of_parameters)

            #calling the training process
            if (parameters.NUM_EPOCHS > 0):
                ims_database, fname_database = train_cnn_tensorFlow.train(
                    parameters)

            #calling the extraction of features for the database images
            feature_vectors_database, ims_database, fname_database = image_feature_extraction_tensorFlow.features_extraction(
                parameters)

            #calling the extraction of features for the retrieval images
            parameters.PATH_TEST = path_retrieval
            parameters.CLASSES = []
            feature_vectors_retrieval, ims_retrieval, _ = image_feature_extraction_tensorFlow.features_extraction(
                parameters)
            feature_vectors_retrieval = feature_vectors_retrieval[:, :-1]

            #funcionando chamando a rede antigo do theano
            #ims_database,fname_database,feature_vectors_database,ims_retrieval,_,feature_vectors_retrieval = CNN_feature_extraction.cnn_features_extraction_probability(path_database, path_retrieval, 0, image_format,feature_extraction_method,1,list_of_parameters)
            #funcionando chamando a rede antiga do tensorFlow
            #ims_database,fname_database,feature_vectors_database,ims_retrieval,_,feature_vectors_retrieval = CNN_feature_extraction.cnn_training_using_tensorFlow(path_database, path_retrieval, path_cnn_trained, image_format,feature_extraction_method,list_of_parameters)

        else:  #O retrieval para uma imagem ainda esta usando o theano...
            ims_database, fname_database, feature_vectors_database, ims_retrieval, _, feature_vectors_retrieval = CNN_feature_extraction.cnn_features_extraction_probability(
                path_database, path_retrieval, -1, image_format,
                feature_extraction_method, 1, list_of_parameters)

    else:
        #check if there is a file computed for this descriptor-distance
        parameters_name = ""
        for parameter in list_of_parameters:
            parameters_name = parameters_name + "_" + parameter

        file = path_database + "features/result" + "_" + feature_extraction_method + parameters_name + ".csv"
        if os.path.isfile(file):
            fname_database = []
            reader = csv.reader(open(file), delimiter=',')
            x = list(reader)
            feature_vectors_database = np.array(x).astype('float')
            ims_database = []
            cont = 0
            for f in folders:
                a = glob(f + '*.' + image_format)
                for i in range(len(a)):
                    fname_database.append(a[i])
                    file = imread(a[i])
                    ims_database.append(file)

        else:
            #get the number of features
            number_of_features = get_number_of_features(
                folders, image_format, feature_extraction_method,
                list_of_parameters)

            #computing features for the database
            ims_database, fname_database, feature_vectors_database = descriptor_all_database(
                path_database + 'database/', folders, image_format,
                feature_extraction_method, number_of_features,
                list_of_parameters)
            np.savetxt(file, feature_vectors_database, delimiter=',')

        #get the number of features
        number_of_features = get_number_of_features(folders, image_format,
                                                    feature_extraction_method,
                                                    list_of_parameters)
        #computing features for the retrieval image(s)
        if not ("." + image_format) in path_retrieval:
            ims_retrieval, _, feature_vectors_retrieval = descriptor_all_database(
                path_retrieval, [], image_format, feature_extraction_method,
                number_of_features, list_of_parameters)
        else:
            print(path_retrieval)
            ims_retrieval, _, feature_vectors_retrieval = descriptor_all_database(
                path_retrieval, -1, image_format, feature_extraction_method,
                number_of_features, list_of_parameters)

    #compute the ranked outputs
    result = searching(feature_vectors_database[:, :-1],
                       feature_vectors_database[:, -1],
                       feature_vectors_retrieval,
                       similarity_metric=distance)
    #show the ranked output
    show_retrieval(ims_database, ims_retrieval, result, number_of_images,
                   path_database, feature_extraction_method, distance,
                   fname_database, feature_vectors_database[:, -1], folders)