예제 #1
0
    def analyze_super_pixels(self, sp_path, safe_path, experiment):
        '''
        analyzes super pixels at given path
        @param sp_path: path to superpixels
        @param safe_path: path to store results
        @param experiment: experiment name
        '''

        if os.path.isfile(safe_path + "/Statistics_" + experiment + ".csv"):
            print "Skipping {0} Analysis - it already exists".format(
                experiment)
            return

        self.log.start("Read SuperPixels from Experiment: {0}"\
                       .format(experiment), 1, 1)
        path_list = path_to_subfolder_pathlist(sp_path, filter=".mat")
        #self.sp_path_list = path_list

        sp_name_list = np.array([
            os.path.splitext(basename(filepath))[0] for filepath in path_list
        ])

        sp_array_list = np.array([
            read_arr_from_matfile(filepath, 'superPixels')
            for filepath in path_list
        ])

        self.log.update()

        self.log.start("Analyze SuperPixels from Experiment: {0}"\
                       .format(experiment), 1, 1)

        num_sp = np.array([len(np.unique(sp)) for sp in sp_array_list])
        num_sp_arg = np.argsort(num_sp)

        stats_sum_file = open(safe_path + '/Stats_Sum_' + experiment + '.txt',
                              'a')
        s = "##################### Statistics for Experiment "+\
            experiment + " #####################\n\n"+\
            "Max SP Num: \t{0}\n".format(num_sp[num_sp_arg[-1]])+\
            "Min SP Num: \t{0}\n".format(num_sp[num_sp_arg[0]])+\
            "Mean SP Num: \t{0}\n".format(np.sum(num_sp)*1.0/len(num_sp))

        stats_sum_file.write(s)
        stats_sum_file.close()

        csv.register_dialect("tab", delimiter="\t", quoting=csv.QUOTE_ALL)
        writer = csv.DictWriter(open(
            safe_path + "/Statistics_" + experiment + ".csv", "wb"),
                                ["Image", "#SuperPixels"],
                                dialect='excel-tab')

        writer.writerow({"Image": "Image", "#SuperPixels": "#SuperPixels"})
        data = [{}]
        for i in range(len(num_sp)):
            data.append({"Image": sp_name_list[i], "#SuperPixels": num_sp[i]})

        writer.writerows(data)

        self.log.update()
def relabel(global_path):
    '''
    relabels EAW superpixels in the way, 
    such that the array has #unique complete clusters
    @param global_path: path to EAW superpixels
    '''

    path_list = utils.path_to_subfolder_pathlist(global_path, filter=".mat")

    relabeled_num = 0
    print path_list
    log = utils.Logger(verbose=True)
    log.start("Relabeling EAW_SuperPixels", len(path_list), 1)
    for path in path_list:
        #print "relabeling {0}".format(path)
        folder = 'none'
        if (True):
            folder = basename(os.path.abspath(os.path.join(path, '..', '..')))
            #im_folder = basename(os.path.abspath(os.path.join(path, '..')))
            target_folder = os.path.abspath(
                os.path.join(
                    path, '..', '..', '..', 'relabeled', folder,
                    basename(os.path.abspath(os.path.join(path, '..')))))
            if not os.path.isdir(target_folder):
                os.makedirs(target_folder)
            #print "Target Folder: {0}".format(target_folder)
            target_path = os.path.abspath(
                os.path.join(path, '..', '..', '..', 'relabeled', folder,
                             target_folder, basename(path)))
            if os.path.isfile(target_path):
                log.update()
                continue
            #print "Target Path: {0}".format(target_path)
            arr = utils.read_arr_from_matfile(path, "ind")

            #print "Relabeling ... {0}".format(basename(path))

            relabel_arr(arr)

            #print "Relabeled ... {0}".format(basename(path))

            utils.write_arr_to_matfile(arr, target_path, "superPixels")
            print "Wrote ... {0}".format(basename(path))
        else:
            print 'Failure in {0}'.format(folder)
            print 'Failure in image {0}'.format(basename(target_path))
            raise
            if folder in [
                    'index_4', 'index_5', 'index_6', 'index_7', 'index_8'
            ]:
                print 'Failure in {0}'.format(folder)
                print 'Failure in image {0}'.format(basename(target_path))
                raise

        log.update()
    print "Relabeled files: {0}/{1}".format(relabeled_num, len(path_list))
def run(path,segments):
    '''
    reads saliency files and converts them into heatmap images to location path/output/
    @param path: path to saliency main folder
    @param segments: folder name to saliency files
    '''
    
    #fh = utils.path_to_subfolder_pathlist("super_pixels",filter=".mat")
    spatial = utils.path_to_subfolder_pathlist(path + '/' + segments,filter=".mat")
    #f = [utils.read_arr_from_matfile(fh[i], "superPixels") for i in range(len(fh))]
    #s = [utils.read_arr_from_matfile(spatial[i],"S") for i in range(len(spatial))]
    if not os.path.exists('output'):
        os.makedirs('output')
    extent = [0,255,0,255]
    for i in range(len(spatial)):
        mat = utils.read_arr_from_matfile(spatial[i],"S")
        if os.path.exists(path + '/' + 'output/'+ basename(dirname(spatial[i])) + '/' + os.path.splitext(basename(spatial[i]))[0]+'.jpg'):
            im = np.array(Image.open(path + '/' + 'output/'+ basename(dirname(spatial[i])) + '/' + os.path.splitext(basename(spatial[i]))[0]+'.jpg'))
            (x,y,_) = im.shape
            (x1,y1) = mat.shape 
            if (x == x1) & (y == y1):
                    

                continue
        
        b = np.zeros(mat.shape)        

        u = np.unique(mat)        
        for j in range(len(u)):
            b[mat==u[j]] = j

        fig = plt.figure(frameon=False)
        
        fig.set_size_inches((mat.shape[1]+0.5)*1.0/100,(mat.shape[0]+0.5)*1.0/100)        
        #fig.set_size_inches(2.56,2.56)
        ax = plt.Axes(fig, [0.,0.,1.,1.])
        ax.set_axis_off()
        fig.add_axes(ax)
        ax.imshow(b, aspect='normal')
        folder = basename(dirname(spatial[i]))
        if not os.path.exists(path + '/' + 'output2/'+folder):
            os.makedirs(path + '/' + 'output2/'+folder)
        fig.savefig(path + '/' + 'output2/'+ folder + '/' + os.path.splitext(basename(spatial[i]))[0]+'.jpg')
        im2 = np.array(Image.open(path + '/' + 'output2/'+ basename(dirname(spatial[i])) + '/' + os.path.splitext(basename(spatial[i]))[0]+'.jpg'))
        (x2,y2,_) = im2.shape
        print '({0},{1} -- {2})'.format(x1-x2,y1-y2,basename(spatial[i]))
        plt.close()
        #print "({0},{1}) -- {2}".format(y1,x1,im2.shape)
        #print "{0}/{1}".format(i+1,len(spatial))
        #plt.savefig('output/'+ os.path.splitext(basename(spatial[i]))[0]+'.jpg')
    print 'done'
예제 #4
0
def segment(in_folder, output_folder):
    '''
	takes salience segments from in_folder and 
	saves black and white images to output_folder
	'''

    #fh = utils.path_to_subfolder_pathlist("super_pixels",filter=".mat")
    sal_path_list = utils.path_to_subfolder_pathlist(in_folder, filter=".mat")
    #f = [utils.read_arr_from_matfile(fh[i], "superPixels") for i in range(len(fh))]
    s = [
        utils.read_arr_from_matfile(sal_path_list[i], "S")
        for i in range(len(sal_path_list))
    ]

    #extent = [0,255,0,255]
    for i in range(len(s)):

        b = np.ones([s[i].shape[0], s[i].shape[1], 3], dtype=np.uint8)
        #b = np.zeros(s[i].shape)
        #u = np.unique(s[i])
        #for j in range(len(u)):
        #	b[s[i]==u[j]] = j
        #print 'shape is {0}'.format(b.shape)
        print "{0}/{1}".format(i + 1, len(s))
        b = b * 255
        b[:, :, 0] = b[:, :, 0] * s[i]
        b[:, :, 1] = b[:, :, 1] * s[i]
        b[:, :, 2] = b[:, :, 2] * s[i]
        fig = plt.figure(frameon=False)
        fig.set_size_inches((mat.shape[1] + 0.5) * 1.0 / 100,
                            (mat.shape[0] + 0.5) * 1.0 / 100)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)
        ax.imshow(b, aspect='normal')
        fig.savefig(output_folder + '/' +
                    os.path.splitext(basename(sal_path_list[i]))[0] + '.jpg')
        print "{0}/{1}".format(i + 1, len(s))
def eaw_val1(path, eaw_path, fl = [0,1,2], eaw=True):
    '''
    before using eaw_val2, use this method
    use output of eaw_val1 as input for eaw_val2
    returns path to experiments folder, eaw_path to scaling functions, 
    folder_list of used scaling_functions, prob_path to labeling probabilities, 
    prob_paths to labeling probabilities, 
    weight_map which indicates scaling functions defined by superpixels
    @param path: path to experiments folder
    @param eaw_path: path to scaling functions
    @param fl: folder list (number array) to used scaling functions
    '''
    
    #folder_list = [f for f in os.listdir(path)]
    if eaw:
        folder_list = np.array(['EAW_1','EAW_2','EAW_3','EAW_4'])
    else:
        folder_list = np.array([eaw_path])
    eaw_folder = np.array(['level_summed1','level_summed2','level_summed3','level_summed4'])
    
    
    folder_list = folder_list[fl]
    eaw_folder = eaw_folder[fl]
    
    
    

    
    log = Logger(verbose=True)
#    log.start('Reading EAW Matrices', len(folder_list)*4, 1)

     
    #path to class probabilities
    prob_path = [path + '/' + folder_list[j] + '/Data/Base/MRF/SemanticLabels/R200K200TNN80-SPscGistCoHist-sc01ratio C00 B.5.1 S0.000 IS0.000 Pcon IPpot Seg WbS1'
                  for j in range(len(folder_list))]
    
     
    prob_paths = [[p for p in path_to_subfolder_pathlist(prob_path[j], filter='.mat')] 
                     for j in range(len(folder_list))]
    




    weight_map = {}
    for i in range(len(folder_list)):
        weight_map[i] = {}
    log.start("Labeling EAW-Results", len(prob_paths[0]),1)
    #run over all test images
    for j in range(len(prob_paths[0])):    
         
        #run over all experiments (eaw_x...eaw_y)
        for i in range(len(folder_list)):
            
            sp_path = path + '/' + folder_list[i] + '/' + 'Data/Descriptors/SP_Desc_k200/super_pixels'
            sp = read_arr_from_matfile(sp_path + '/' + os.path.basename(
                                    os.path.dirname(prob_paths[i][j])) + '/' + \
                                    os.path.basename(prob_paths[i][j]),'superPixels')

            if eaw:
                weight_map[i][j] = read_arr_from_matfile(eaw_path + '/' +\
                                                      eaw_folder[i] +'/'+\
                                        os.path.basename(
                                    os.path.dirname(prob_paths[i][j])) + '/' +\
                                    os.path.basename(prob_paths[i][j]),'im')
            else:
                weight_map[i][j] = np.ones(sp.shape)
            

        log.update()
    
    return path, eaw_path, folder_list, prob_path, prob_paths, weight_map
예제 #6
0
def eaw_val1(path, eaw_path, fl = [0,1,2], eaw=True):
    '''
    summarize ResultMRF from given experiments folder (path)
    '''
    
    #folder_list = [f for f in os.listdir(path)]
    if eaw:
        folder_list = np.array(['EAW_1','EAW_2','EAW_3','EAW_4'])
    else:
        folder_list = np.array([eaw_path])
    #folder_list = np.array(['eaw_4','eaw_5', 'eaw_6', 'eaw_7'])
    eaw_folder = np.array(['level_summed1','level_summed2','level_summed3','level_summed4'])
    #eaw_folder = np.array(['level_4','level_5','level_6','level_7'])
    #sp_folder = np.array(['index_4','index_5','index_6','index_7'])
    #eaw_sp = np.array([256,64,16,4])
    
    folder_list = folder_list[fl]
    eaw_folder = eaw_folder[fl]
    #sp_folder = sp_folder[fl]
    #eaw_sp = eaw_sp[fl]
    
    

    
    log = Logger(verbose=True)
#    log.start('Reading EAW Matrices', len(folder_list)*4, 1)

     
    #path to class probabilities
    prob_path = [path + '/' + folder_list[j] + '/Data/Base/MRF/GeoLabels/R200K200TNN80-SPscGistCoHist-sc01ratio C00 B.5.1 S0.000 IS0.000 Pcon IPpot Seg WbS1'
                  for j in range(len(folder_list))]
    
     
    prob_paths = [[p for p in path_to_subfolder_pathlist(prob_path[j], filter='.mat')] 
                     for j in range(len(folder_list))]
    
#     prob_paths = [[p for p in (prob_path[j]) if 'cache' not in p] 
#                      for j in range(len(folder_list))]
    
    #eaw_p = [eaw_path + '/' + eaw_folder[j] for j in range(len(folder_list))]


    final_labels = {}
    k = 1


    weight_map = {}
    for i in range(len(folder_list)):
        weight_map[i] = {}
    log.start("Labeling EAW-Results", len(prob_paths[0]),1)
    #run over all test images
    for j in range(len(prob_paths[0])):    
         
        #run over all experiments (eaw_x...eaw_y)
        for i in range(len(folder_list)):
            
            sp_path = path + '/' + folder_list[i] + '/' + 'Data/Descriptors/SP_Desc_k200/super_pixels'
            #sp_path = eaw_path + '/relabeled/' + sp_folder[i]
            #sp = read_arr_from_matfile(prob_paths[i][j],'superPixels')
#             pp = read_dict_from_matfile(sp_path + '/' + os.path.basename(
#                                     os.path.dirname(prob_paths[i][j])) + '/' + \
#                                     os.path.basename(prob_paths[i][j]))
            sp = read_arr_from_matfile(sp_path + '/' + os.path.basename(
                                    os.path.dirname(prob_paths[i][j])) + '/' + \
                                    os.path.basename(prob_paths[i][j]),'superPixels')
            #sp = read_arr_from_matfile(sp_path + '/' + prob_paths[i][j],'superPixels') 
            
            #print os.path.basename(prob_paths[i][j])
            if eaw:
                weight_map[i][j] = read_arr_from_matfile(eaw_path + '/' +\
                                                      eaw_folder[i] +'/'+\
                                        os.path.basename(
                                    os.path.dirname(prob_paths[i][j])) + '/' +\
                                    os.path.basename(prob_paths[i][j]),'im')
            else:
                weight_map[i][j] = np.ones(sp.shape)
            
            #weight_map[i][j] = np.zeros(sp.shape)

##### loading edge avoiding wavelet - scaling functions             
#             eaw = [read_arr_from_matfile(eaw_p[i] + '/' + \
#                     os.path.splitext(prob_paths[i][j])[0] + '_k_' + str(l) + \
#                     '.mat','im') for l in (np.array(range(eaw_sp[i]))+1)]


#             for u in np.unique(sp):
#                 
#                 weight_map[i][j][sp == u] = \
#                     eaw[u-1][sp == u]
             
            #weight_map[i][j] = weight_map[i][j]**(i+4)
        log.update()
    
    return path, eaw_path, folder_list, prob_path, prob_paths, weight_map
예제 #7
0
    def analyze_sp_class_size(self, sp_class_path, safe_path, experiment):
        '''
        analyzes super pixels according to their size and labels
        @param sp_class_path: used label paths (e.g. /GeoLabels, /SemanticLabels
        @param safe_path: path to store statistics at
        @param experiment: experiment name
        '''

        stats = {}

        for i in range(len(sp_class_path)):

            stats[i] = {}
            evals = ['test', 'train', 'total']

            skipping = True
            for ev in evals:
                skipping = skipping & os.path.isfile(safe_path+"/SP_Stats_Size_" + \
                                         experiment +'_' + ev + ".csv")

            if skipping:
                print "Skipping {0} sp_size_Analysis - files already exist".format(
                    experiment)
                return

            stats[i]['test'] = {}
            stats[i]['train'] = {}
            stats[i]['total'] = {}
            path_list = path_to_subfolder_pathlist(sp_class_path[i] + \
                                            '/SP_Desc_k200', filter=".mat")

            path_list = [
                path for path in path_list
                if (os.path.splitext(basename(path))[0] != 'segIndex')
            ]

            image_name_list = np.array([
                os.path.splitext(basename(filepath))[0]
                for filepath in path_list
            ])

            test_files = read_arr_from_txt(os.path.abspath(
                os.path.join(sp_class_path[i], '..', 'TestSet1.txt')),
                                           dtype=np.str)
            test_file_names = np.array([
                os.path.splitext(basename(test_file))[0]
                for test_file in test_files
            ])

            test_file_indices = np.array([
                j for j in range(len(image_name_list))
                if image_name_list[j] in test_file_names
            ])

            train_file_indices = np.array([
                j for j in range(len(image_name_list))
                if j not in test_file_indices
            ])


            self.log.start("Analyzing SuperPixel Size per Label in Set {0}".\
                           format(i),
                       len(object_labels[i]) + \
                           len(test_file_indices)*len(object_labels[i]) + \
                           len(train_file_indices)*len(object_labels[i]) + \
                           len(path_list)*len(object_labels[i]), 1)

            for j in range(len(object_labels[i])):

                # (num_sp, total size, min_size, max_size, mean_size)
                stats[i]['test'][object_labels[i][j]] = np.array(
                    [0, 0, 9999999, 0])
                stats[i]['train'][object_labels[i][j]] = np.array(
                    [0, 0, 9999999, 0])
                stats[i]['total'][object_labels[i][j]] = np.array(
                    [0, 0, 9999999, 0])
                self.log.update()

            #evaluate test files
            for ind in test_file_indices:
                for j in range(len(object_labels[i])):
                    seg_file = read_dict_from_matfile(path_list[ind])['index']

                    if len(seg_file['label'][0][0]) > 0:
                        label = seg_file['label'][0][0][0]\
                            [seg_file['label'][0][0][0]==j+1]
                        sp_size = seg_file['spSize'][0][0][0]\
                            [seg_file['label'][0][0][0]==j+1]

                        if len(label) > 0:
                            stats[i]['test'][object_labels[i][j]]+= \
                                np.array([len(label),
                                      sum(sp_size),
                                      min(min(sp_size) - stats[i]['test'][object_labels[i][j]][2],0),
                                      max(max(sp_size) - stats[i]['test'][object_labels[i][j]][3],0)])
                    self.log.update()

            for ind in train_file_indices:
                for j in range(len(object_labels[i])):
                    seg_file = read_dict_from_matfile(path_list[ind])['index']

                    if len(seg_file['label'][0][0]) > 0:
                        label = seg_file['label'][0][0][0]\
                            [seg_file['label'][0][0][0]==j+1]
                        sp_size = seg_file['spSize'][0][0][0]\
                            [seg_file['label'][0][0][0]==j+1]

                        if len(label) > 0:
                            stats[i]['train'][object_labels[i][j]]+= \
                                np.array([len(label),
                                  sum(sp_size),
                                  min(min(sp_size) - stats[i]['train'][object_labels[i][j]][2],0),
                                  max(max(sp_size) - stats[i]['train'][object_labels[i][j]][3],0)])

                    self.log.update()

            for ind in range(len(path_list)):
                for j in range(len(object_labels[i])):
                    seg_file = read_dict_from_matfile(path_list[ind])['index']

                    if len(seg_file['label'][0][0]) > 0:
                        label = seg_file['label'][0][0][0]\
                            [seg_file['label'][0][0][0]==j+1]
                        sp_size = seg_file['spSize'][0][0][0]\
                            [seg_file['label'][0][0][0]==j+1]

                        if len(label) > 0:
                            stats[i]['total'][object_labels[i][j]]+= \
                                np.array([len(label),
                                  sum(sp_size),
                                  min(min(sp_size) - \
                                      stats[i]['total'][object_labels[i][j]][2],
                                      0),
                                  max(max(sp_size)-stats[i]['total']\
                                      [object_labels[i][j]][3],0)])
                    self.log.update()

        self.log.start("Writing SuperPixel Size Statistic ",
                       len(evals) * (sum([len(x) for x in object_labels])), 1)

        for ev in evals:
            csv.register_dialect("tab", delimiter="\t", quoting=csv.QUOTE_ALL)
            # (num_sp, total size, min_size, max_size, mean_size)
            writer = csv.DictWriter(open(safe_path+"/SP_Stats_Size_" + \
                                         experiment +'_' + ev + ".csv", "wb"),
                            ["Label", "#SuperPixels", "#Pixel", "#min_sp_size",
                             "#max_sp_size", "#mean_sp_size"], dialect = 'excel-tab')

            writer.writerow({
                "Label": "Label",
                "#SuperPixels": "#SuperPixels",
                "#Pixel": "#Pixel",
                "#min_sp_size": "#min_sp_size",
                "#max_sp_size": "#max_sp_size",
                "#mean_sp_size": "#mean_sp_size"
            })

            data = [{}]
            for i in range(len(sp_class_path)):
                data.append({
                    "Label": "###",
                    "#SuperPixels": "###",
                    "#Pixel": "###",
                    "#min_sp_size": "###",
                    "#max_sp_size": "###",
                    "#mean_sp_size": "###"
                })
                for lab in object_labels[i]:

                    data.append({"Label" : lab,
                                 "#SuperPixels" : stats[i][ev][lab][0],
                                 "#Pixel" : stats[i][ev][lab][1],
                                 "#min_sp_size" : stats[i][ev][lab][2],
                                 "#max_sp_size" : stats[i][ev][lab][3],
                                 "#mean_sp_size" : stats[i][ev][lab][1]*1.0/\
                                                    stats[i][ev][lab][0]})
                    self.log.update()

            writer.writerows(data)