コード例 #1
0
def avg_perf_savefig(avg_cscore, c_score, subj_name, params=None):
    """ Saves 2 figures at specified path using params key= list [0 or 1,save_path_string]
        -first element of the "params" list 1 or 0 activates or deactivates plot function.
        -second element is the string which specifies save path for the plot.
        fig1: Class scores 1 subj vs all subs
        fig2 : Average performance save fig """
    if params[0] == 1:

        goal_dir = os.path.join(params[1])

        # plot 1 subj vs all
        name1 = '1vsAll'
        axis1 = ['subjects', 'subjects']
        mpt.plot_confusion_matrix(c_score, classes=subj_name, normalize=False, title='mhad class score per subject', axs=axis1)
        plt.savefig(goal_dir + name1)
        plt.close('all')

        # # Plot Average Performance in dataset
        name2 = 'avg_performance_dtset'
        col_label = ['average']
        img_view = np.reshape(avg_cscore, (12, 1))
        im, cbar= heatmap(img_view, subj_name, col_label, cmap='jet')
        texts = annotate_heatmap(im, valfmt="{x:.2f} ")
        plt.axes().set_aspect('auto')
        plt.title('Average Performance for every Subject\n Mhad_dataset')
        plt.savefig(goal_dir + name2)
        plt.close('all')
コード例 #2
0
def classifyKNN(frame, database, k, metric):
    distances = []

    for iframe in range(0, database.shape[0]):
        d = metric(frame, database[iframe][0])
        distances.append((d, database[iframe][1], database[iframe][2]))

    distances.sort(key=lambda row: row[0])

    return mpt.most_often_occurence(distances[0:k])
コード例 #3
0
def cfm_savefig(subject1,subject2,params_cmf):
    """Saves Confusion matrix of 2 subjects and 11 actions at the specified path params_cmf[]
    input: subject1, subject2 names strings
    ouput: plt.savefig to path"""

    if params_cmf[4] == 1:

        goal_dir = os.path.join(params_cmf[5])

        axlabel = [subject2, subject1]  # [x,y]
        mpt.plot_confusion_matrix(params_cmf[0], classes=params_cmf[1], normalize=False, title='confusion matrix', axs=axlabel)
        # plt.plot(indexes[:,0], indexes[:,1],'ro')
        plt.title('Classification Score = ' + str(round(params_cmf[2], 2)) + '%\nmisclassified=' + str(int(params_cmf[3])))
        plt.rcParams.update({'font.size': 13})
        plt.tight_layout()
        # plt.show()

        name = subject1 + '_' + subject2
        my_file = name + '_cfm'

        plt.savefig(goal_dir + my_file)
        plt.close('all')
コード例 #4
0
# DTW figures path
savefig_dtw = os.getcwd() + "/plots/MP_Similarity_Matrix/dtw_results/"

# sflag =  0 : Turn off plots , 1: save figures to path
sflag = 1

FV_new = []

for name in dataset:

    dataset_dir = "/home/evangeloit/Desktop/GitBlit_Master/PythonModel3dTracker/Data/" + name + ".json"
    input_dir = "/home/evangeloit/Desktop/GitBlit_Master/PythonModel3dTracker/Data/rs/Human_tracking/" \
                + name + "_results_ldm.json"

    ## Load data from Json ##
    dataPoints, dataLim = mpt.load_data(input_dir, dataset_dir)

    init_frame = 0  # dataLim['limits'][0]
    last_frame = 110  # dataLim['limits'][1]

    ##### Create 3D points array #####

    p3d = mpt.Create3dPoints(init_frame, last_frame, dataPoints, model_name)

    ## Gaussian Filter 5 by 1 in time dimension

    p3d_gauss = mpt.GaussFilter3dPoints(p3d, sigma, t)

    #### Create Feature Vector ####

    feat_vec, vec, acc = mpt.MovPoseDescriptor(p3d_gauss, StartFrame)
コード例 #5
0
from Moving_Pose_Descriptor import MP_tools2 as mpt
from Moving_Pose_Descriptor import confmat as cfm
from Moving_Pose_Descriptor import Threshold_Precision_Recall as TPR
# from Moving_Pose_Descriptor.heatmap import heatmap
# from Moving_Pose_Descriptor.heatmap import annotate_heatmap
# import matplotlib.pyplot as plt
# from heatmap import heatmap
# from heatmap import annotate_heatmap
# from munkres import Munkres
import json

# Controllers

#Open mhad dataset jsonfile
with open(os.path.join(os.environ['mvpd'], "dataset.json")) as f:
    dataset_s1 = mpt.AlpNumSorter(list(json.load(f)))

#Paths
dtpath = '/home/evangeloit/Desktop/GitBlit_Master/PythonModel3dTracker/Data/data/'
landmarks_path = "/home/evangeloit/Desktop/GitBlit_Master/PythonModel3dTracker/Data/rs/Human_tracking/results_camera_invariant/"
# os.chdir(dtpath) # Mhad Dataset directory

model_name = 'mh_body_male_customquat'

# Actions
actions = [
    "A01", "A02", "A03", "A04", "A05", "A06", "A07", "A08", "A09", "A10", "A11"
]

# Gaussian Filter Parameters
sigma = 1
コード例 #6
0
#                "mh_body_male_customquat"
#                ]

# datasets = [
#     'mhad_s03_a04','mhad_s06_a04',
#     'mhad_s12_a04','mhad_s11_a04',
#     'mhad_s05_a04','mhad_s02_a04',
#     'mhad_s10_a04'
# ]

mod_name = ["mh_body_male_customquat"]
# datasets = dtlist.datasets_list(dtpath)

#Open mhad dataset jsonfile
with open(os.path.join(os.environ['mvpd'], "dataset.json")) as f:
    datasets = mpt.AlpNumSorter(list(json.load(f)))
#take out specific actions
print(datasets)
print(len(datasets))

k = len(datasets)
model_names = mod_name * k

# print(model_names)
# print(datasets)
# print(len(model_names))
# print(len(datasets))

#model_names = ["mh_body_male_customquat"] * len(datasets)
#model_names = ["mh_body_male_custom"]
#datasets = ["mhad_ammar"]
コード例 #7
0
database = np.array(database)

testframes = database.copy()

# compute confidence for every frame
k = 5
class_frames = []
for income in range(0, testframes.shape[0]):

        #incoming frame Feature Vector
        fv_in = testframes[income][0]
        # print("incoming frame no:",income ,"  iSub", testframes[income][1], "  iAct", testframes[income][2] )
        print("frame :", income)#,"  iSub", testframes[income][1], "  iAct", testframes[income][2] )

        #filter Database
        newdatabase = mpt.filterdatabase(testframes[income],database) #exclude iSubject's all iAction frames

        #Random frame numbers
        partData=mpt.randomData(newdatabase, percent=0.1)

        #confidence of frame that belong to a class (frame_feature vector ,
        confidence_tuple = FrameWiseClassify.classframe(fv_in, newdatabase, k, dataPercent=partData)

        cf = (fv_in, confidence_tuple)
        class_frames.append(cf)

class_frames = np.array(class_frames)
np.save('class_frames_k5.npy',class_frames)

print("--- %s seconds ---" % (time.time() - start_time))
コード例 #8
0
# compute confidence for every frame
k = 4
class_frames = []
ct = 0

for income in range(0, 1100):

    #incoming frame Feature Vector
    fv_in = testframes[income][0]
    # print("incoming frame no:",income ,"  iSub", testframes[income][1], "  iAct", testframes[income][2] )
    print(
        "frame :", income
    )  #,"  iSub", testframes[income][1], "  iAct", testframes[income][2] )

    #filter Database
    newdatabase = mpt.filterdatabase(
        testframes[income], database)  #exclude iSubject's all iAction frames

    #Random frame numbers
    partData = None  #mpt.randomData(newdatabase, percent=None)

    #confidence of frame that belong to a class (frame_feature vector ,
    confidence_tuple = FrameWiseClassify.classframe(fv_in,
                                                    newdatabase,
                                                    k,
                                                    dataPercent=partData)

    change_action = abs(testframes[income][2] - testframes[income + 1][2])

    if change_action != 0:
        ct = 0
コード例 #9
0
def Conf2Subject(subject1,subject2,dtpath,fv_1,fv_2,params=None ):
    """Conf2Subj ::

    inputs: subject1,subject2,dataset path,feature vector of s1,FV of s2,
    params =[0 or 1,string] / 0: No plot saving ,1: saves all plots, string: contains the absolute path of the
    destination save folder.

    output: m x m score matrix between subj1,subj2 for act [0-11]
            class_score : (rowmin+colmin)/(2*num of actions) * %
            missclass"""
    act_s1, act_s1_not = mpt.list_ext(os.path.join(dtpath, subject1), 'json')
    act_s2, act_s2_not = mpt.list_ext(os.path.join(dtpath, subject2), 'json')

    act_s1_not = mpt.AlpNumSorter(act_s1_not)
    act_s2_not = mpt.AlpNumSorter(act_s2_not)
    # print(act_s1_not)
    # print(act_s2_not)
    # print("new_pair:")
    score = np.empty((len(act_s1_not), len(act_s2_not)), np.dtype(np.float32))

    for sub1 in range(0, len(act_s1_not)):
        for sub2 in range(0, len(act_s2_not)):
            Y = cdist(fv_1[sub1], fv_2[sub2], 'euclidean')
            p, q, C, phi = mpt.dtwC(Y, 0.1)

            # Sum of diagonal steps
            dsteps = 0
            for r in range(0, len(q) - 1):
                qdot = abs(q[r] - q[r + 1])
                pdot = abs(p[r] - p[r + 1])
                s = qdot + pdot
                if s == 2:
                    dsteps = dsteps + 1
                # print(dsteps)

            # Scores of DTW for every subject / Objective Function
            score[sub1][sub2] = (C[-1, -1] / ((Y.shape[0] + Y.shape[1])))

            mpt.DistMatPlot(Y, params[1], q, p, dtwscore=score[sub1][sub2],
                            name=act_s1_not[sub1] + "_" + act_s2_not[sub2], flag='DTW',
                            save_flag=params[0])

    #Class Score [min row + min col]
    # Pscore = ((score/np.amax(score))*100).copy()

    Pminrow = np.argmin(score, axis=1)  # axis=1 row min index
    Pmincol = np.argmin(score, axis=0)  # axis=0 col min index
    Pvec = np.arange(0, len(Pminrow))

    pr = Pminrow == Pvec
    pr = pr*1
    pc = Pmincol == Pvec
    pc = pc*1

    mtot = pr + pc

    missclass = (2*len(score)) - np.sum(mtot)
    # # print(missclass)
    class_score = (np.sum(mtot, dtype=float) / (2 * len(score))) * 100
    # print(class_score)

    return score, class_score , missclass
コード例 #10
0
# Compare one set with all the other datasets -- save Figure path
savefig_comp = os.getcwd() + "/plots/conf_matrix/MP_comp_mat/"

# DTW figures path
savefig_dtw = os.getcwd() + "/plots/conf_matrix/dtw_res_conf/"

# sflag =  0 : Turn off plots , 1: save figures to path
sflag = 0

FV_new = []

subj_name = os.listdir(os.getcwd())  # List of Subjects in the directory

for subj in range(0, len(subj_name)):  #for every subject

    acts, acts_not = mpt.list_files1(
        os.path.join(os.getcwd(), subj_name[subj]), 'json')
    # print(acts)
    # # acts = os.listdir(os.path.join(os.getcwd(), subj_name[subj]))
    for act in range(0, len(acts)):  # for every action of a subject
        # for name in dataset_s1:

        # dataset_dir = "/home/evangeloit/Desktop/GitBlit_Master/PythonModel3dTracker/Data/data/" + name + ".json"
        dataset_dir = os.path.join(os.getcwd(), subj_name[subj], acts[act])
        input_dir = "/home/evangeloit/Desktop/GitBlit_Master/PythonModel3dTracker/Data/rs/Human_tracking/results_camera_invariant/" \
                    + acts_not[act] + "_ldm.json"

        ## Load data from Json ##
        dataPoints, dataLim = mpt.load_data(input_dir, dataset_dir)

        init_frame = dataLim['limits'][0]
        last_frame = dataLim['limits'][1]