def __call__(self, data_x, batch_size=32, lomo_=0, lomo_config=None):
        out_dad = np.zeros((len(data_x), self.feature_dim), np.float32)
        _run_in_batches(
            lambda x: self.session.run(self.output_var, feed_dict=x),
            {self.input_var: data_x}, out_dad, batch_size)

        ##### ADD NEW FEATURES HERE ####
        if lomo_ == 1:
            out_ = []
            for patch in data_x:
                image = cv2.resize(patch, (32, 64))
                out_.append(lomo.LOMO(image, lomo_config))
            out = np.concatenate((out_dad, out_), axis=1)

        else:
            out = out_dad

        return out
Ejemplo n.º 2
0
import os

import json
import cv2

import lomo

data_path = 'data'
img_list = os.listdir(data_path)
if len(img_list) == 0:
    print('Data directory is empty.')
    exit()

with open('config.json', 'r') as f:
    config = json.load(f)

for img_name in img_list:
    if img_name == '.gitkeep':
        continue

    img = cv2.imread(os.path.join(data_path, img_name))

    lomo_desc = lomo.LOMO(img, config)

    print('Lomo feature size:', lomo_desc.shape[0])
Ejemplo n.º 3
0
def op_lomo_extractor(keypoints, config, bgr_image, show_patch=False):
    #Returns:
    # features : vect(n_features)
    #       a LOMO descriptor of different parts of the body from a set of openpose keypoints associated to a player
    # conf : float
    #        confidence / strength of the signal
    #LOMO [4] is a descriptor for person Re-ID that divides each image into horizontal
    # bands and finds the maximum bins of color and texture histograms in each stripe. We
    # modified this code to use it on body parts.
    # https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Liao_Person_Re-Identification_by_2015_CVPR_paper.pdf
    #
    #We could add other features, but they need to reflect a constant property (i.e. invariant of the position on the field and current body pose) of the player
    #e.g. arms length / legs length ratio
    #the camera has a single perspective on the field so 2D estimation is a problem for estimating lengths of body parts (or ratios)

    block_size_col = config['lomo']['block_size_col']
    block_step_col = config['lomo']['block_step_col']

    block_size_row = config['lomo']['block_size_row']
    block_step_row = config['lomo']['block_step_row']
    patch_width = config['lomo']['patch_width']
    conf = 0
    features = []

    """(8 ∗ 8 ∗ 8 color
                bins + 3^4 ∗ 2 SILTP bins ) ∗ (24 + 11 + 5 horizontal groups
                ) = 26, 960 dimensions"""
    n_lomo_features = ( config['lomo']['hsv_bin_size'] ** 3 + 3 ** 4 * len(config['lomo']['R_list']) ) * (
                        int((patch_width - (block_size_row - block_step_row)) / block_step_row) +
                        int(int(patch_width / 2 - (block_size_row - block_step_row)) / block_step_row) +
                        int(int(patch_width / 4 - (block_size_row - block_step_row)) / block_step_row)
                           )

    for k, (a, b, aspect_ratio) in body_parts.items():
        Ax = keypoints[a][0]
        Bx = keypoints[b][0]
        Ay = keypoints[a][1]
        By = keypoints[b][1]

        Aconf = keypoints[a][2]
        Bconf = keypoints[b][2]

        center = ((Ax + Bx)/2, (Ay + By)/2)
        width = int(math.sqrt((Ax - Bx) ** 2 + (Ay - By) ** 2))
        height = int(width * aspect_ratio)



        l = np.zeros(n_lomo_features)

        if Aconf > 0.1 and Bconf > 0.1 and abs(Ax - Bx) > 1 and abs(Ay - By) > 1:

            # theta=0 correspond au cas où Ay=By
            theta = 90 + math.atan((Ay - By) / (Ax - Bx))
            if (Ax - Bx) < 0:
                theta += 180

            patch = subimage(bgr_image, center, theta, width, height)
            #print("patch shape ", patch.shape)

            if patch.shape[0]>0 and patch.shape[1]>0:
                patch_reshaped = cv2.resize(patch, (patch_width, int(patch_width * aspect_ratio)))
                patch_reshaped = np.transpose(patch_reshaped, [1, 0, 2])

                #print("reshaped patch shape ", patch_reshaped.shape)

                if(show_patch):
                    cv2.imshow(k+str(np.random.rand()), patch)
                    cv2.waitKey()
                    cv2.imshow("reshaped_"+k + str(np.random.rand()), patch_reshaped)
                    cv2.waitKey()

                l = lomo.LOMO(patch_reshaped, config)
                l *= Aconf * Bconf
                conf += (Aconf * Bconf) / len(body_parts)


        features += list(l.reshape(-1))

    return np.array(features), conf
Ejemplo n.º 4
0
 print('Processing images in directory ' + data_path + directory)
 
 # initialize dictionary for the feature vectors of all the images (img_name as key)
 lomos = {}
 
 #iterate over images
 for counter, img_name in enumerate(tqdm(img_list[:20], desc='images processed')):
     # verify that file is image -> only perform lomo on images
     if not img_name.lower().endswith(('.jpg','.jpeg','.bmp','.png')):
         continue
     
     # read image
     img = cv2.imread(os.path.join(data_path + directory, img_name))
     
     # compute LOMO feature vector
     lomo_vec = lomo.LOMO(img, config)
     
     # append image and corresponding vector to dict
     lomos[img_name] = lomo_vec
     
 print('directory ' + directory + ' finished')
 
 # write data in dat file
 print('start writing data into file')
 with open(data_path + 'lomo_' + directory + '.dat', 'w') as l_file:
     csvwriter = csv.writer(l_file, dialect='excel', delimiter=',')
     header = lomos.keys()
     csvwriter.writerow(header)
     for element in tqdm(range(lomos[img_name].size), desc='images processed'):
         row = []
         for key in header:
Ejemplo n.º 5
0
import os

import json
import cv2

import lomo

data_path = 'data'
img_list = os.listdir(data_path)
if len(img_list) == 0:
    print 'Data directory is empty.'
    exit()

with open('config.json', 'r') as f:
    config = json.load(f)

for img_name in img_list:
    if img_name == '.gitkeep':
        continue

    img = cv2.imread(os.path.join(data_path, img_name))

    lomo = lomo.LOMO(img, config)

    print 'Lomo feature size:', lomo.shape[0]
Ejemplo n.º 6
0
def extract_query(initial_id,r_id,cam_id,flag,unique_id):
    '''
    Extract features and perform cosine similarity to dertimine re-identification

    Objective:
    1. Read images extracted from YOLO+DeepSORT and store features, afterwhich image will be deleted
    2. Compare features of all images 
    3. If feature match > 70%, store both IDs to be replaced in main tracker
    4. Store all initial re-identifications and updated ones onto text file
    5. Clear extracted features and repeat process

    '''
    reid_full = open('logs/reid_full.txt','w')
    start = time.time()
    
    while not flag.is_set():
        time.sleep(5)
        #reset all stored info when called again (prevents continous stacking) (Objective 5)
        query_features.clear()
        q_id.clear()
        gallery_features.clear()
        g_id.clear()
        cam_num.clear()
        query_list = sorted(os.listdir('./images/query'))
        gallery_list = sorted(os.listdir('./images/gallery'))

        #Read and Extract features from images (Objective 1)
        for file in query_list:
            image = cv2.imread(os.path.join('./images/query',file))
            result = lomo.LOMO(image,lomo_config)
            query_features.append(result)#Append to list
            q_id.append(file.split('.')[0])
            os.remove('./images/query/'+file)#Remove image after features extracted
            
        for file in gallery_list:
            image = cv2.imread(os.path.join('./images/gallery',file))
            camera_id = file.split('_')[1].split('.')[0]
            result = lomo.LOMO(image,lomo_config)
            gallery_features.append(result)
            g_id.append(file.split('_')[0])
            cam_num.append(camera_id)#Append camera number
            os.remove('./images/gallery/'+file)

        # Comparsion of features (Objective 2)
        for cam in range(1,len(source_names)):
            for i in range(len(query_features)):
                highest_score = 0
                for j in range(len(gallery_features)):
                    if not int(cam_num[j]) == cam+1:
                        continue
                    cos_sim = 1 - spatial.distance.cosine(query_features[i],gallery_features[j])

                    if cos_sim > 0.7:
                        if cos_sim > highest_score:
                            highest_score = cos_sim
                            query_num = i
                            gallery_num = j
                            source_names
                # Store matched ID in inital_id for replacing (Objective 3)
                if not highest_score == 0:
                    #Update
                    if int(g_id[gallery_num]) in initial_id: #If initial camID is already in list
                        index = initial_id.index(int(g_id[gallery_num])) #Get index of camID stored
                        if not r_id[index] == int(q_id[query_num]): #Ensure that the 2 targets are not previously defined as a match
                            r_id[index] = int(q_id[query_num]) #Update value
                            print('ID '+g_id[gallery_num]+' updated to '+q_id[query_num])
                            # (Objective 4)
                            reid_full.write('ID '+g_id[gallery_num]+' updated to '+q_id[query_num]+' at '+str(round(time.time()-start))+' seconds')
                            reid_full.write('\n')
                        else:
                            pass

                    #New creation
                    elif int(q_id[query_num]) not in r_id and int(g_id[gallery_num]) not in initial_id and not (g_id[gallery_num] == q_id[query_num]):
                        initial_id.append(int(g_id[gallery_num]))
                        #Create and append CAM 1 id into r_id which r_id is a global list in the Manager
                        #r_id contains matches with CAM 1
                        r_id.append(int(q_id[query_num]))
                        unique_id.append(unique_prefix+str(len(initial_id)))
                        print(q_id[query_num] +' identified with '+g_id[gallery_num]+' on camera '+cam_num[gallery_num])
                        # (Objective 4)
                        reid_full.write(q_id[query_num] +' identified with '+g_id[gallery_num]+' on camera '+cam_num[gallery_num]+' at '+str(round(time.time()-start))+' seconds')
                        reid_full.write('\n')
    reid_full.close()