plt.plot(frames_count,
                         pred_label_temp,
                         color='#ffff00',
                         linewidth=2.0)

            if False:
                plt.show()
            else:
                from pylab import savefig
                import matplotlib.pyplot as pl
                save_dir = r'C:\Users\PC-User\Documents\GitHub\chalearn2014_wudi_lio\path_combined'
                save_path = os.path.join(save_dir, file)
                savefig(save_path + '.eps', format='eps', bbox_inches='tight')
                #plt.show()

            print "Elapsed time %d sec" % int(time.time() - time_tic)

            pred = []
            for i in range(len(begin_frame)):
                pred.append([pred_label[i], begin_frame[i], end_frame[i]])

            sample.exportPredictions(pred, outPred)
            # ###############################################
            ## delete the sample
            del sample

    TruthDir = r'I:\Kaggle_multimodal\Test_label'
    final_score = evalGesture(outPred, TruthDir)
    print("The score for this prediction is " + "{:.12f}".format(final_score))

# combined: 0.804609104245
                frames_count = numpy.array(range(begin_frame[i], end_frame[i]+1))
                pred_label_temp = ((pred_label[i]-1) *STATE_NO +2) * numpy.ones(len(frames_count))
                plt.plot(frames_count, pred_label_temp, color='#ffff00', linewidth=2.0)

            if False:
                plt.show()
            else:     
                from pylab import savefig
                import matplotlib.pyplot as pl
                save_dir=r'C:\Users\PC-User\Documents\GitHub\chalearn2014_wudi_lio\path_combined'
                save_path= os.path.join(save_dir,file)
                savefig(save_path+'.eps', format='eps', bbox_inches='tight')
                #plt.show()

            print "Elapsed time %d sec" % int(time.time() - time_tic)

            pred=[]
            for i in range(len(begin_frame)):
                pred.append([ pred_label[i], begin_frame[i], end_frame[i]] )

       
            sample.exportPredictions(pred,outPred)
         # ###############################################
            ## delete the sample
            del sample
        
    TruthDir=r'I:\Kaggle_multimodal\Test_label'
    final_score = evalGesture(outPred,TruthDir)         
    print("The score for this prediction is " + "{:.12f}".format(final_score))

# combined: 0.804609104245
    def predLabel(self, video_file, matrix_file, pred_file, filename):
        #        time_start = time.time()
        print("\t Processing file " + filename)
        sample_video = GestureSample(video_file)
        observ_likelihood = cPickle.load(open(matrix_file, "rb"))
        #        print 'fininsh Loadinging obs_likelihodd'
        #        print observ_likelihood.shape

        log_observ_likelihood = log(observ_likelihood.T +
                                    numpy.finfo(numpy.float32).eps)
        log_observ_likelihood[-1, 0:5] = 0
        log_observ_likelihood[-1, -5:] = 0

        #viterbi decoding
        [path, predecessor_state_index,
         global_score] = viterbi_path_log(log(Prior), log(Transition_matrix),
                                          log_observ_likelihood)
        [pred_label, begin_frame, end_frame, Individual_score,
         frame_length] = viterbi_colab_states(path,
                                              global_score,
                                              state_no=5,
                                              threshold=-5,
                                              mini_frame=15)
        #heuristically we need to add 1 more frame here
        begin_frame += 1
        end_frame += 5  # because we cut 4 frames as a cuboid so we need add extra 4 frames

        #plotting
        gesturesList = sample_video.getGestures()
        import matplotlib.pyplot as plt
        STATE_NO = 5
        im = imdisplay(global_score)

        plt.plot(range(global_score.shape[-1]),
                 path,
                 color='#39FF14',
                 linewidth=2.0)
        plt.xlim((0, global_score.shape[-1]))
        plt.ylim((101, 0))
        plt.xlabel('Frames')
        plt.ylabel('HMM states')
        plt.title('Multi_model(DBN+3DCNN)')

        # plot ground truth
        for gesture in gesturesList:
            gestureID, startFrame, endFrame = gesture
            frames_count = numpy.array(range(startFrame, endFrame + 1))
            pred_label_temp = (
                (gestureID - 1) * STATE_NO + 2) * numpy.ones(len(frames_count))
            plt.plot(frames_count, pred_label_temp, color='r', linewidth=5.0)

        # plot clean path
        for i in range(len(begin_frame)):
            rames_count = numpy.array(range(begin_frame[i], end_frame[i] + 1))
            pred_label_temp = ((pred_label[i] - 1) * STATE_NO +
                               2) * numpy.ones(len(frames_count))
            plt.plot(frames_count,
                     pred_label_temp,
                     color='#FFFF33',
                     linewidth=2.0)
        plt.show()

        pred = []
        for i in range(len(begin_frame)):
            pred.append([pred_label[i], begin_frame[i], end_frame[i]])
        sample_video.exportPredictions(pred, pred_file)