def main(): """ Main script. Show how to perform all competition steps """ # Data folder (Training data) data='./data/'; # Train folder (output) outTrain='./training/train/' # Test folder (output) outTest='./training/test/' # Predictions folder (output) outPred='./results/step1all/'; # Ground truth folder (output) outGT='./GT/'; # Submision folder (output) outSubmision='./submision/' # Divide data into train and test # createDataSets(data,outTrain,outTest,0.3); # Learn your model # if os.path.exists("model.npy"): # model=numpy.load("model.npy"); # else: # model=learnModel(outTrain); # numpy.save("model",model); # # Predict over test dataset # predict(model,outTest,outPred); # # Create evaluation gt from labeled data # exportGT_Gesture(outTest,outGT); # Evaluate your predictions score=evalGesture(outPred, outGT); print("The score for this prediction is " + "{:.12f}".format(score));
def main(): prediction_dir = r'I:\Kaggle_multimodal\StartingKit_track3\CoDaLab_Gesure_track3\matlab\prediction_650_conv' #prediction_dir = r'I:\Kaggle_multimodal\StartingKit_track3\CoDaLab_Gesure_track3\matlab\prediction_650' #truth_dir = r'I:\Kaggle_multimodal\validation_labels' truth_dir = r'I:\Kaggle_multimodal\validation' final_score = evalGesture(prediction_dir, truth_dir) print "final_score " + str(final_score)
def main(): prediction_dir = r'I:\Kaggle_multimodal\StartingKit_track3\CoDaLab_Gesure_track3\matlab\prediction_650_conv' #prediction_dir = r'I:\Kaggle_multimodal\StartingKit_track3\CoDaLab_Gesure_track3\matlab\prediction_650' #truth_dir = r'I:\Kaggle_multimodal\validation_labels' truth_dir = r'I:\Kaggle_multimodal\validation' final_score = evalGesture(prediction_dir,truth_dir) print "final_score "+str(final_score)
frames_count = numpy.array( range(begin_frame[i], end_frame[i] + 1)) pred_label_temp = ((pred_label[i] - 1) * 10 + 5) * numpy.ones( len(frames_count)) plt.plot(frames_count, pred_label_temp, color='#ffff00', linewidth=2.0) plt.show() else: print "Elapsed time %d sec" % int(time.time() - time_tic) pred = [] for i in range(len(begin_frame)): pred.append([pred_label[i], begin_frame[i], end_frame[i]]) smp.exportPredictions(pred, outPred) # ############################################### ## delete the sample del smp TruthDir = './training/gt/' final_score = evalGesture(outPred, TruthDir) print("The score for this prediction is " + "{:.12f}".format(final_score)) # Submision folder (output) outSubmision = './training/submision/' # Prepare submision file (only for validation and final evaluation data sets) createSubmisionFile(outPred, outSubmision)
from ChalearnLAPEvaluation import evalGesture predPath=r'.\training\test/' predPath=r'.\ConvNet_3DCNN\training\Test_3DCNN_ConvNet__2014-06-25_18.21.33_250/' #predPath=r'I:\Kaggle_multimodal\Code_for_submission\Final_project\ConvNet_3DCNN\training\pred_sk_norm' #predPath=r'I:\Kaggle_multimodal\StartingKit_track3\Final_project\training\test_combined/' TruthDir=r'I:\Kaggle_multimodal\ChalearnLAP2104_EvaluateTrack3\input/ref/' #TruthDir=r'I:\Kaggle_multimodal\Valid_650_labels/' final_score = evalGesture(predPath,TruthDir, begin_add=0, end_add=0) print("The score for this prediction is " + "{:.12f}".format(final_score)) #The score for this prediction is 0.816150551922--combined #The score for this prediction is 0.787309630209--skeleton
cPickle.dump(dic, out_file, protocol=cPickle.HIGHEST_PROTOCOL) out_file.close() pred=[] for i in range(len(begin_frame)): pred.append([ pred_label[i], begin_frame[i], end_frame[i]] ) smp.exportPredictions(pred,outPred) # ############################################### ## delete the sample del smp TruthDir=r'I:\Kaggle_multimodal\ChalearnLAP2104_EvaluateTrack3\input\ref//' CNN_NAME = 'ConvNet__2014-05-28_01.59.00_150' outPred='./ConvNet_3DCNN/training/Test_3DCNN_' + CNN_NAME final_score = evalGesture(outPred,TruthDir) print("The score for this prediction is " + "{:.12f}".format(final_score)) # Submision folder (output) outSubmision='./training/submision/' # Prepare submision file (only for validation and final evaluation data sets) createSubmisionFile(outPred, outSubmision) #ConvNet__2014-06-25_18.21.33_250 The score for this prediction is 0.637141036068 #ConvNet__2014-06-25_18.56.59_162 The score for this prediction is 0.679523343823 #ConvNet__2014-05-28_01.59.00_150 The score for this prediction is 0.702863204283 #'I:\Kaggle_multimodal\StartingKit_track3\Final_project\ConvNet_3DCNN\tmp\ConvNet__2014-05-26_03.40.18' # The score for this prediction is 0.702863204283
plt.plot(frames_count, pred_label_temp, color='#ffff00', linewidth=2.0) from pylab import * if True: save_dir=r'.\ConvNet_3DCNN\training\Depth_path_combined' if not os.path.exists(save_dir): os.makedirs(save_dir) save_path= os.path.join(save_dir,file) savefig(save_path, bbox_inches='tight') #plt.show() else: plt.show() #def exportPredictions(self, prediction,predPath): """ Export the given prediction to the correct file in the given predictions path """ if not os.path.exists(predPath): os.makedirs(predPath) output_filename = os.path.join(predPath, file + '_prediction.csv') output_file = open(output_filename, 'wb') for row in prediction: output_file.write(repr(int(row[0])) + "," + repr(int(row[1])) + "," + repr(int(row[2])) + "\n") output_file.close() TruthDir='./training/gt/' final_score = evalGesture(predPath,TruthDir) print("The score for this prediction is " + "{:.12f}".format(final_score)) # The score for this prediction is 0.830624726889!!!!!!!!!! Combine two depth!!!!!
from ChalearnLAPEvaluation import evalGesture predPath = r'.\training\test/' predPath = r'.\ConvNet_3DCNN\training\Test_3DCNN_ConvNet__2014-06-25_18.21.33_250/' #predPath=r'I:\Kaggle_multimodal\Code_for_submission\Final_project\ConvNet_3DCNN\training\pred_sk_norm' #predPath=r'I:\Kaggle_multimodal\StartingKit_track3\Final_project\training\test_combined/' TruthDir = r'I:\Kaggle_multimodal\ChalearnLAP2104_EvaluateTrack3\input/ref/' #TruthDir=r'I:\Kaggle_multimodal\Valid_650_labels/' final_score = evalGesture(predPath, TruthDir, begin_add=0, end_add=0) print("The score for this prediction is " + "{:.12f}".format(final_score)) #The score for this prediction is 0.816150551922--combined #The score for this prediction is 0.787309630209--skeleton
#input_dir = sys.argv[1] #output_dir = sys.argv[2] input_dir = './' output_dir = './result' submit_dir = os.path.join(input_dir, 'prediction') truth_dir = os.path.join(input_dir, 'reference') print(submit_dir) print(truth_dir) if not os.path.isdir(submit_dir): print("%s doesn't exist" % submit_dir) if os.path.isdir(submit_dir) and os.path.isdir(truth_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) output_filename = os.path.join(output_dir, 'scores.txt') output_file = open(output_filename, 'w') # Call evaluation for this track score = evalGesture(submit_dir, truth_dir) print("Score: %f" % score) # Store the score output_file.write("Overlap: %f" % score) output_file.close()