def get_features(params):
    #Obrim el fitxer que conte les ID de les imatges d'entrenament
    ID=open(os.path.join(params['root'],params['database'],'train','ImageIDs.txt'), 'r')
    #Extraccio de les caracteristiques de la imatge de la primera linia del ImageIDs.txt
    nom=str(ID.readline()).replace('\n','')
    desc_train=get_local_features(params,os.path.join(params['root'],params['database'],'train','images',nom + '.jpg'))
    #Extraccio de les caracteristiques per a la resta de les imatges d'entrenament
    dictrain=dict()
    dictrain[nom]=desc_train
    for line in ID:
        nom=str(line).replace('\n','')
        x=get_local_features(params,os.path.join(params['root'],params['database'],'train','images',nom + '.jpg'))
        #Concatenar les caracteristiques de cada imatge en una numpy array
        desc_train=np.concatenate((desc_train,x))
        dictrain[nom]=x
    #Tanquem el fitxer
    ID.close()

    #Entrenament del KMeans nomes per a les imatges d'entrenament amb 1024 paraules
    paraules=1024
    codebook=train_codebook(params,desc_train,paraules)
    #Obrim el fitxer que conte les ID de les imatges d'entrenament
    ID=open(os.path.join(params['root'],params['database'],'train','ImageIDs.txt'), 'r')

    for line in ID:
        nom=str(line).replace('\n','')
        #Calculem les assignacions per les imatges d'entrenament
        assignments=get_assignments(dictrain[nom],codebook)
        #Creacio del BoW per les imatges d'entrenament i emplenament del diccionari
        dictrain[nom]=build_bow(assignments,codebook,paraules)
    #Tanquem el fitxer
    ID.close()

    #Guardem el diccionari amb el BoW de les imatges d'entrenament en l'arxiu "Features.txt"
    bow_train = open (os.path.join(params['root'],params['database'],'train','Features.txt'), 'w')
    pk.dump(dictrain,bow_train)
    bow_train.close()

    #Obrim el fitxer que conté les ID de les imatges de validacio
    ID = open(os.path.join(params['root'],params['database'],'val','ImageIDs.txt'), 'r')
    #Creacio del diccionari de les imatges de validacio
    dicval=dict()
    for line in ID:
        #Extraccio de les caracteristiques per a les imatges de validacio
        x=get_local_features(params,os.path.join(params['root'],params['database'],'val','images',str(line).replace('\n','') + '.jpg'))
        #Calculem les assignacions per les imatges de validacio
        assignments=get_assignments(x,codebook)
        #Creacio del BoW per les imatges de validacio i emplenament del diccionari
        dicval[str(line).replace('\n','')]=build_bow(assignments,codebook,paraules)
    #Tanquem el fitxer
    ID.close()

    #Guardem el diccionari amb el BoW de les imatges de validacio en l'arxiu "Features.txt"
    bow_val = open (os.path.join(params['root'],params['database'],'val','Features.txt'), 'w')
    pk.dump(dicval,bow_val)
    bow_val.close()
def extract_features(params):

    feats_dic = {}
    
    with open(os.path.join(params['root'],params['root_save'],params['image_lists'],params['split'] + '.txt'),'r') as f:
        image_list = f.readlines()


    # Get trained codebook
    code = pickle.load(open(os.path.join(params['root'],params['root_save'],
                                     params['codebooks_dir'],'codebook_train_val_'
                                     + str(params['descriptor_size']) + "_"
                                     + params['descriptor_type']
                                     + "_" + params['keypoint_type'] + '.cb'),'rb'))
        
    for img in image_list:
        
        im = cv2.imread(os.path.join(params['root'],params['database'],params['split'],'images',img.rstrip()))
        im = resize_image(params,im)
        des = get_local_features(params,im)
        assign = get_assignments(code,des)
        feats_dic[img] = build_bow(assign,code)
        
    # Save dictionary to disk with unique name
    save_file = os.path.join(params['root'],params['root_save'],params['feats_dir'],
                             params['split'] + "_" + str(params['descriptor_size']) + "_"
                             + params['descriptor_type'] + "_" + params['keypoint_type'] + '.p')

    pickle.dump(feats_dic,open(save_file,'wb'))
def extract_features(params):

    Train_or_Val_dic = {}
    
    with open(os.path.join(params['root'],params['root_save'],params['image_lists'],params['split'] + '.txt'),'r') as f:
        image_list = f.readlines()
    
   
    #Train_files= os.listdir(r'C:\Users\Albert\Documents\UNI\Q-5\GDSA\Projecte\TerrassaBuildings900\Train\images')
    #Val_files= os.listdir(r'C:\Users\Albert\Documents\UNI\Q-5\GDSA\Projecte\TerrassaBuildings900\Val\images')
    
    descriptors = []
    
    if params['split'] == 'train':
        
        for img in image_list:
            im = cv2.imread(os.path.join(params['root'],params['database'],params['split'],'images',img.rstrip()))
            
            # Resize image
            im = resize_image(params,im)
        
            des=get_local_features(params,im)
            
            if len(descriptors) == 0:
                descriptors = des
            else:
                descriptors = np.vstack((descriptors,des))
            
        code=train_codebook(params,descriptors)
        
        # Save to disk
        pickle.dump(code,open(os.path.join(params['root'],params['root_save'],
                                        params['codebooks_dir'],'codebook_'
                                        + str(params['descriptor_size']) + "_"
                                        + params['descriptor_type']
                                        + "_" + params['keypoint_type'] + '.cb'),'wb'))
    
    # Get trained codebook
    code = pickle.load(open(os.path.join(params['root'],params['root_save'],
                                     params['codebooks_dir'],'codebook_'
                                     + str(params['descriptor_size']) + "_"
                                     + params['descriptor_type']
                                     + "_" + params['keypoint_type'] + '.cb'),'rb'))
        
    for img in image_list:
        
        im = cv2.imread(os.path.join(params['root'],params['database'],params['split'],'images',img.rstrip()))
        # Resize image
        im = resize_image(params,im)
        des=get_local_features(params,im)
        assign=get_assignments(code,des)
        Train_or_Val_dic[img] = build_bow(assign,code)
        
    # Save dictionary to disk with unique name
    save_file = os.path.join(params['root'],params['root_save'],params['feats_dir'],
                             params['split'] + "_" + str(params['descriptor_size']) + "_"
                             + params['descriptor_type'] + "_" + params['keypoint_type'] + '.p')

    pickle.dump(Train_or_Val_dic,open(save_file,'wb'))
Exemple #4
0
def get_features(params, pca=None, scaler=None):

    # Read image names/Llegim les imatges
    with open(
            os.path.join(params['root'], params['root_save'],
                         params['image_lists'], params['split'] + '.txt'),
            'r') as f:
        image_list = f.readlines()

    # Initialize keypoint detector and feature extractor/ Inicialitzem els detectors i els extractors
    detector, extractor = init_detect_extract(params)

    # Initialize feature dictionary/Inicialitzem el diccionari
    features = {}

    # Get trained codebook /Llegim el codebook entenat
    km = pickle.load(
        open(
            os.path.join(
                params['root'], params['root_save'], params['codebooks_dir'],
                'codebook_' + str(params['descriptor_size']) + "_" +
                params['descriptor_type'] + "_" + params['keypoint_type'] +
                '.cb'), 'rb'))

    for image_name in image_list:
        image_name = image_name.replace('\n', '')
        # Read image/Llegim l'imatge
        im = cv2.imread(
            os.path.join(params['root'], params['database'], params['split'],
                         'images', image_name))

        # Resize image
        im = resize_image(params, im)

        # Extract local features
        #f_orb = image_local_features(im)
        #f_sift=sift_features(im)

        # posem f_i que sigui igual que la dimensió f_o
        #f_sift.resize(f_orb.shape)
        #feats=np.concatenate((f_sift,f_orb))

        feats = image_local_features(params, im)
        #f_orb.resize(f_root.shape)
        #feats=np.concatenate((f_root,f_orb))

        if feats is not None:

            if params['normalize_feats']:
                feats = normalize(feats)

            # If we scaled training features
            if scaler is not None:
                scaler.transform(feats)

            # Whiten if needed
            if pca is not None:

                pca.transform(feats)

            # Compute assignemnts
            assignments = get_assignments(km, feats)

            # Generate bow vector
            feats = bow(assignments, km)
        else:
            # Empty features
            feats = np.zeros(params['descriptor_size'])
        '''
        print f_sift[1]
        print f_orb[1]
        print len(f_orb)
        print len(f_sift)
        '''
        #print len(feats)

        # Add entry to dictionary
        features[image_name] = feats

    # Save dictionary to disk with unique name
    save_file = os.path.join(
        params['root'], params['root_save'], params['feats_dir'],
        params['split'] + "_" + str(params['descriptor_size']) + "_" +
        params['descriptor_type'] + "_" + params['keypoint_type'] + '.p')

    pickle.dump(features, open(save_file, 'wb'))
def get_features(params, pca=None, scaler=None):

    # Read image names
    with open(
            os.path.join(params['root'], params['root_save'],
                         params['image_lists'], params['split'] + '.txt'),
            'r') as f:
        image_list = f.readlines()

    # Initialize keypoint detector and feature extractor
    detector, extractor = init_detect_extract(params)

    # Initialize feature dictionary
    features = {}

    # Get trained codebook
    km = pickle.load(
        open(
            os.path.join(
                params['root'], params['root_save'], params['codebooks_dir'],
                'codebook_' + str(params['descriptor_size']) + "_" +
                params['descriptor_type'] + "_" + params['keypoint_type'] +
                '.cb'), 'rb'))

    for image_name in image_list:

        # Read image
        im = cv2.imread(
            os.path.join(params['root'], params['database'], params['split'],
                         'images', image_name.rstrip()))

        # Resize image
        im = resize_image(params, im)

        # Extract local features
        feats = image_local_features(im, detector, extractor)

        if feats is not None:

            if params['normalize_feats']:
                feats = normalize(feats)

            # If we scaled training features
            if scaler is not None:
                scaler.transform(feats)

            # Whiten if needed
            if pca is not None:

                pca.transform(feats)

            # Compute assignemnts
            assignments = get_assignments(km, feats)

            # Generate bow vector
            feats = bow(assignments, km)
        else:
            # Empty features
            feats = np.zeros(params['descriptor_size'])

        # Add entry to dictionary
        features[image_name] = feats

    # Save dictionary to disk with unique name
    save_file = os.path.join(
        params['root'], params['root_save'], params['feats_dir'],
        params['split'] + "_" + str(params['descriptor_size']) + "_" +
        params['descriptor_type'] + "_" + params['keypoint_type'] + '.p')

    pickle.dump(features, open(save_file, 'wb'))
Exemple #6
0
dir = path.dirname(__file__)

#dir = sys.path.insert(0,__file__)
terrassa_buildings = '../TB2016/'



def build_bow(assignments, n):
    # Inicialitzem a zeros un vector de mida dels clusters
    descriptor = np.zeros((n,))

    # Construim un vector de repeticions.Cada assignments li atribuim un cluster
    for n_assig in assignments:
        descriptor[n_assig] += 1

    # L2 normalize
    descriptor = normalize(descriptor)

    return descriptor


# Comprovem que funciona
descriptor1 = get_local_features(path.join(terrassa_buildings, "../train/images/aaeoeolbth.jpg"))
codebook = train_codebook(5, descriptor1)
descriptor2 = get_local_features(path.join(terrassa_buildings, "../val/images/aalfirydrf.jpg"))
assig = get_assignments(codebook, descriptor2)

# Crea un vector ordenat amb els descriptors que equival a cada regio (k=5)
asdf = build_bow(assig, 50)
print asdf
print ("Numero de regiones diferentes: " + str(len(asdf)))
Exemple #7
0
    dictrain[nom] = x
#Tanquem el fitxer
ID.close()

#Entrenament del KMeans nomes per a les imatges d'entrenament amb 1024 paraules
paraules = 1024
codebook = train_codebook(params, desc_train, paraules)

#Obrim el fitxer que conte les ID de les imatges d'entrenament
ID = open(
    os.path.join(params['root'], params['database'], 'train', 'ImageIDs.txt'),
    'r')
for line in ID:
    nom = str(line).replace('\n', '')
    #Calculem les assignacions per les imatges d'entrenament
    assignments = get_assignments(dictrain[nom], codebook)
    #Creacio del BoW per les imatges d'entrenament i emplenament del diccionari
    dictrain[nom] = build_bow(assignments, codebook, paraules)
#Tanquem el fitxer
ID.close()

#Guardem el diccionari amb el BoW de les imatges d'entrenament en l'arxiu "Features.txt"
bow_train = open(
    os.path.join(params['root'], params['database'], 'train', 'Features.txt'),
    'w')
pk.dump(dictrain, bow_train)
bow_train.close()

#Obrim el fitxer que conté les ID de les imatges de validacio
ID = open(
    os.path.join(params['root'], params['database'], 'val', 'ImageIDs.txt'),