Ejemplo n.º 1
0
                    # recog_sq.append(label)
                    # target_sq.append(target)

                    recog_sq[str(k)].append(label)
                    target_sq[str(k)].append(target)
                    k += 1
                start_To = start_To + S_enc_frame

            for m in range(len(recog_sq.keys())):

                target = target_sq[str(m)][1:]
                recog = recog_sq[str(m)][1:]
                f_name = vid.split('/')[-1].split('.')[0]
                path = args.results_save_path + "/obs" + str(int(
                    args.S_enc)) + "-pred" + str(m)
                write_predictions(path, f_name, recog)
                path = args.results_save_path + "/obs" + str(int(
                    args.S_enc)) + "-pred" + str(m) + "_target"
                write_predictions(path, f_name + "_target", target)
                # report = classification_report(target, recog,output_dict=True)
                # print ("Accuracy:  " +   str(round(accuracy_score(target,recog) * 100,2)))
                # print ("Precision  " +  str(round(report['macro avg']['precision']*100,2)))
                # print ("Recall     "    +     str(round(report['macro avg']['recall']*100,2)))
                # print ("f1-score   "  +   str( round(report['macro avg']['f1-score']*100,2)))
                # n_T=np.zeros(len(actions_dict.keys()))
                # n_F=np.zeros(len(actions_dict.keys()))
                # for i in range(len(target)):
                #     if target[i]==recog[i]:
                #         n_T[actions_dict[target[i]]]+=1 # Se la classe per questo frame e stata riconosciuta aggiugno 1 a True
                #     else:
                #         n_F[actions_dict[target[i]]]+=1 # Se la calss di questo frame e errata aggiungo uno a False
Ejemplo n.º 2
0
             for pred_p in pred_percentages:
                 pred_len = int(pred_p*vid_len)  
                 output_len = pred_len + len(observed_content)
                 
                 label_seq, length_seq = get_label_length_seq(observed_content)                    
                 with tf.Session() as sess:
                     label_seq, length_seq = model.predict(sess, model_restore_path, pred_len, label_seq, length_seq, actions_dict, T)
                 
                 recognition = []
                 for i in range(len(label_seq)):
                     recognition = np.concatenate((recognition, [label_seq[i]]*int(length_seq[i])))
                 recognition = recognition[:output_len]
                 #write results to file
                 f_name = vid.split('/')[-1].split('.')[0]
                 path=args.results_save_path+"/obs"+str(obs_p)+"-pred"+str(pred_p)
                 write_predictions(path, f_name, recognition)
                 
                     
 elif args.model == "cnn":
     model = ModelCNN(args.nRows, nClasses)
     for vid in list_of_videos:
         f_name = vid.split('/')[-1].split('.')[0]
         observed_content=[]
         vid_len = 0
         if args.input_type == "gt":
             file_ptr = open(vid, 'r') 
             content = file_ptr.read().split('\n')[:-1] 
             vid_len = len(content)
             
         for obs_p in obs_percentages:
             if args.input_type == "decoded":