コード例 #1
0
'''
This program assigns near and far player attributes to points in annotation files

Currently requires each set to be done manually

'''
import numpy as np
import json
import os
import math
from os import listdir
from os.path import isfile, join
import utilities.paths as paths

DRIVE = paths.get_drive()

VIDEOS = ['V001']

PLAYERS = {'V001': {'P1': ['Roger', 'Federer'], 'P2': ['Rafael', 'Nadal']},
           'V006': {'P1': ['Serena', 'Williams'], 'P2': ['Maria', 'Sharapova']},
           'V007': {'P1': ['Novak', 'Djokovic'], 'P2': ['Jo-Wilfried', 'Tsonga']},
           'V008': {'P1': ['Vera', 'Zvonareva'], 'P2': ['Serena', 'Williams']},
           'V009': {'P1': ['Roger', 'Federer'], 'P2': ['Juan Martin', 'Delpotro']},
           'V010': {'P1': ['Serena', 'Williams'], 'P2': ['Victoria', 'Azarenka']}}

out_path = DRIVE + 'DATASETS/VIDEO/TENNIS/ANNOTATOR_FILES/'
for video in VIDEOS:
    if os.path.isfile(out_path + 'AF' + video[1:] + '.json'):
        print 'File ' + out_path + 'AF' + video[1:] + '.json' + ' exists. Loading it.'
        inF = open(out_path + 'AF' + video[1:] + '.json', 'r')
        database = json.load(inF)
コード例 #2
0
ファイル: predict.py プロジェクト: HaydenFaulkner/phd
from keras_code.rnns.sequence import models
from keras.models import Model

import numpy as np
import time
import os
import argparse

import utilities.paths as paths
DRIVE = paths.get_drive()


def predict(model_id, model_path, data_paths_path, feature_path, split, batch_size=None, load_epoch=None, layers=['pred'], save_path=None, equalised=False):

    start_time = time.clock()
    output_classes = 7
    model = models.get_model_from_id(model_id)
    if model is None:
        return

    # Load log
    model_path = model_path + model_id
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    if load_epoch is not None:
        print 'Loading model: ' + model_path + '/' + model_id + '_' + str(load_epoch) + '.h5'
        model.load_weights(model_path + '/' + model_id + '_' + str(load_epoch) + '.h5')
    else:
        print 'ERROR: Need load_epoch number to load'
        return
コード例 #3
0
ファイル: vid2img.py プロジェクト: HaydenFaulkner/phd
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-v", "--videofile", type=str, #required=True,
                    help='Path to video file')
    parser.add_argument("-s", "--slicefile", type=str, #required=True,
                    help='Path to a slice file. If included will only do sliced frames. If not will do all frames. '
                         'You can also use -f --first and -l --last to specify frame boundaries.')
    parser.add_argument("-o", "--outputdir", type=str, #required=True,
                    help='Path of directory where to save images .png')
    parser.add_argument("-cw", "--cropwidth", type=int, #required=True,
                    help='Will crop frame to this width around center BEFORE squashing. If omitted will use frame width.')
    parser.add_argument("-ch", "--cropheight", type=int, #required=True,
                    help='Will crop frame to this height around center BEFORE squashing. If omitted will use frame height.')
    parser.add_argument("-sw", "--squashwidth", type=int, #required=True,
                    help='Output frame width. Will squash to this width around center. If omitted will use crop width.')
    parser.add_argument("-sh", "--squashheight", type=int, #required=True,
                    help='Output frame height. Will squash to this height around center. If omitted will use crop height.')
    parser.add_argument("-f", "--first", type=int, #required=True,
                    help='Start at this frame. If omitted will start at beginning of video.')
    parser.add_argument("-l", "--last", type=int, #required=True,
                    help='Finish at this frame. If omitted will finish at end of video.')
    args = parser.parse_args()

    in_file = args.videofile
    slice_file = args.slicefile
    out_file = args.outputdir
    crop_width = args.cropwidth
    crop_height = args.cropheight
    squash_width = args.squashwidth
    squash_height = args.squashheight
    start = args.first
    end = args.last

    # if want to hard code, keep commented out in general:
    vid_id = '001'
    print vid_id
    in_file = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/ORIGINAL/V'+vid_id
    slice_file = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/SLICES/VS'+vid_id+'.txt'
    out_file = paths.get_drive(2) + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/FRAMES/'
    crop_width = 1200
    crop_height = 700
    squash_width = 512
    squash_height = 512
    ###############################################

    # Check passed paths
    err = False
    if not in_file:
        err = True
        print 'No video file path specified, please specify one with -v. Or can hardcode in file.'

    if not out_file:
        err = True
        print 'No output path specified, please specify one with -s. Or can hardcode in file.'

    if err:
        return

    capture = cv2.VideoCapture(in_file)
    global TOTAL_FRAMES
    if NEW_OCV:
        frame_width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
    else:
        frame_width = int(capture.get(cv.CV_CAP_PROP_FRAME_WIDTH))
        frame_height = int(capture.get(cv.CV_CAP_PROP_FRAME_HEIGHT))


    if not crop_width and not crop_height:
        crop_width = frame_width
        crop_height = frame_height
        print 'No crop size specified, using original frame size of ' + str(frame_width) + ',' + str(frame_height) + 'px.'
    elif not crop_width:
        crop_width = frame_width
        print 'No crop width specified, using original frame width of '+str(frame_width)+'px.'
    elif not crop_height:
        crop_height = frame_height
        print 'No crop height specified, using original frame height of '+str(frame_height)+'px.'

    if not squash_width and not squash_height:
        squash_width = crop_width
        squash_height = crop_height
        print 'No squash size specified, using cropped frame size of ' + str(crop_width) + ',' + str(crop_height) + 'px.'
    elif not crop_width:
        squash_width = crop_width
        print 'No squash width specified, using cropped frame width of '+str(crop_width)+'px.'
    elif not crop_height:
        squash_height = crop_height
        print 'No squash height specified, using cropped frame height of '+str(crop_height)+'px.'


    # try slice file
    if os.path.exists(slice_file):
        with open(slice_file, 'r') as f:
            slices = [word.strip() for word in f]
        for i in range(len(slices)):
            slices[i] = slices[i].split()
            slices[i] = [int(slice_n) for slice_n in slices[i]]
        if len(slices) == 0:
            print 'Slice file empty. Will use either first (-f) or last (-l) if specified or will do entire video.'
        elif len(slices) == 1:
            start = min(slices[0])-125
            end = max(slices[0])+125
            print 'One slice found. Starting at frame '+str(start)+' and stopping at frame '+str(end)+'.'
        else:
            # ensure dont use start and end
            start = None
            end = None
            print 'Multiple slices found.'

    else:
        print 'Slice file path specified does not exist. Will use either first (-f) or last (-l) if specified or will do entire video.'


    if start is not None:
        if NEW_OCV:
            capture.set(cv2.CAP_PROP_POS_FRAMES, start)
        else:
            capture.set(cv.CV_CAP_PROP_POS_FRAMES, start)

    else:
        start = 0
    if end is None:

        if NEW_OCV:
            end = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
        else:
            end = int(capture.get(cv.CV_CAP_PROP_FRAME_COUNT))


    out_file = out_file+'V'+vid_id+'/'+str(crop_width)+'x'+str(crop_height)+'_'+str(squash_width)+'x'+str(squash_height)
    if not os.path.exists(out_file):
        os.makedirs(out_file)

    current = start
    start_time = time.clock()
    maxmax = 0
    for slice in slices:
        maxmax = max(maxmax,max(slice))

    while True:

        flag, frame = capture.read()
        if flag == 0:
            break
        current += 1

        do = False

        for slice in slices:
            if (current >= min(slice)) & (current <= max(slice)):
                do = True
                break

        if (current > end) or (current > maxmax):
            break

        if not do:
            continue

        # crop
        frame2 = frame[int((np.shape(frame)[0]-crop_height)/2.0):crop_height+int((np.shape(frame)[0]-crop_height)/2.0),int((np.shape(frame)[1]-crop_width)/2.0):crop_width+int((np.shape(frame)[1]-crop_width)/2.0),:]
        frame = cv2.resize(frame2, (squash_width, squash_height))

        cv2.imwrite("%s/%s_F%08d.png" % (out_file, 'V'+vid_id, current), frame)

        if current % int(end/10.0) == 0:
            tr = (end-current)/(current/(time.clock()-start_time))
            print 'Frame: %d / %d ; Time Remaining: %02d:%02d:%02d' % (current, end, int((tr/60)/60),int((tr/60)%60),int(tr%60))
コード例 #4
0
def visualise(video_id, model_id, split_id, do_splits, vid_dest_path, load_epoch=None, show=False, save_timelines=False, soft=None):
    show_commentary = True


    vid_path = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/ORIGINAL/'+video_id
    #OR
    if model_id == 'MVCD001' or model_id == 'MVCDn001' or model_id == 'MVCDn002':  # 1100x650_512x512
        img_dir_path = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/FRAMES/'+video_id+'/1100x650_512x512/'
    elif model_id == 'MVCD002':  # 1100x650_227x227
        img_dir_path = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/FRAMES/'+video_id+'/1100x650_227x227/'
    elif 'MVK_02' in model_id or 'MVK_04' in model_id or 'MVK_06' in model_id or 'MVK_08' in model_id or 'MVK_10' in model_id:  # 1100x650_227x227
        img_dir_path = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/FLOW/'+video_id+'/1200x700_512x512/'
    else:# 'MVCD003' in model_id or 'MVCD004' in model_id or 'MVCD005' in model_id:  # 1200x700_512x512
        img_dir_path = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/FRAMES/' + video_id + '/1200x700_512x512/'


    prob_path = DRIVE + 'DATASETS/VIDEO/TENNIS/FEATURES/FULL_VIDEOS/'+video_id+'/'+model_id+'/prob/npy/ind/' ### CHANGE FROM VC001 to V001 and in actual foler
    prob_path = paths.get_drive(2) + 'DATASETS/VIDEO/TENNIS/FEATURES/SPLITS/'+split_id+'/'+model_id+'_'+str(load_epoch)+'/pred/npy/ind/' ### CHANGE FROM VC001 to V001 and in actual foler
    # if 'MVK' in model_id:
    #     labels_path = DRIVE + 'MODELS/VISUAL/KERAS/'+model_id+'/labels.txt'
    # else:
    #     labels_path = DRIVE + 'MODELS/VISUAL/CAFFE/DIGITS/'+model_id+'/labels.txt'

    slice_path = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/SLICES/VS'+video_id[1:]+'.txt'
    splits_path = DRIVE + 'DATASETS/VIDEO/TENNIS/SPLITS/'+split_id+'/'+video_id+'.txt'

    if not os.path.exists(vid_dest_path):
        os.makedirs(vid_dest_path)
    # use slices to set start and end points
    start = None
    end = None


    with open(DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/SLICES/VS' + video_id[1:] + '.txt') as f:
        slices = f.readlines()

    slices_ = []
    for slice in slices:
        slices_.append(slice.split())
    slices = []
    for slice in slices_:
        slices += range(int(slice[0]), int(slice[1]))

    do = []
    if 'All' in do_splits:
        do = slices
    else:
        with open(DRIVE + 'DATASETS/VIDEO/TENNIS/SPLITS/' + split_id + '/' + video_id + '.txt') as f:
            splits = f.readlines()

        splits_ = []
        for split in splits:
            splits_.append(split.strip('\n').split())
        splits = []
        for split in splits_:
            if split[2] in do_splits:
                splits += range(int(split[0]), int(split[1]))
        for frame in slices:
            if frame in splits:
                do.append(frame)

    start = min(do)
    end = max(do)

    # open labels
    labels_path = DRIVE + 'DATASETS/VIDEO/TENNIS/SPLITS/' + split_id + '/labels.txt'
    with open(labels_path, 'r') as f:
        labels = f.readlines()

    # read splits
    with open(splits_path, 'r') as f:
        splits = f.readlines()
    splits = [line.rstrip().split() for line in splits]

    # get ground truth from digits splits
    split_names = ['train','val','test']
    # get all
    all = {}
    for split_name in split_names:
        with open(DRIVE + 'DATASETS/VIDEO/TENNIS/SPLITS/'+split_id+'/DIGITS/'+split_name+'_paths.txt') as f:
            data = f.readlines()
        data = [line.rstrip().split() for line in data]
        for line in data:
            if line[0][-18:-14] == video_id:
                all[int(line[0][-12:-4])] = int(line[1])

    # get equalised
    equalised = {}
    for split_name in split_names:
        with open(DRIVE + 'DATASETS/VIDEO/TENNIS/SPLITS/'+split_id+'/DIGITS/'+split_name+'_paths_equalised.txt') as f:
            data = f.readlines()
        data = [line.rstrip().split() for line in data]
        for line in data:
            if line[0][-18:-14] == video_id:
                equalised[int(line[0][-12:-4])] = int(line[1])

    # get points data for commentary
    if show_commentary:
        with open(DRIVE + 'DATASETS/VIDEO/TENNIS/SPLITS/' + split_id + '/CLASSES/Point_splits.txt') as f:
            data = f.readlines()
        data = [line.rstrip().split() for line in data]

        if model_id == 'MVK_10_02' and (load_epoch == 6 or load_epoch == 10):
            sent_model, best_epoch = 'MSK_01_30',220 # 128 dim, no extra feats
        elif model_id == 'MQK_46_02' and load_epoch == 10:
            sent_model, best_epoch = 'MSK_04_20', 260  # 256 dim, no extra feats
        elif model_id == 'MQK_46_01' and load_epoch == 4:
            sent_model, best_epoch = 'MSK_03_20', 280  # 256 dim, no extra feats
        elif model_id == 'MQK_12_01' and load_epoch == 1:
            sent_model, best_epoch = 'MSK_03_20', 280  # 256 dim, no extra feats,
        else:
            print 'ERROR: CANT FIND SENTENCE MODEL!!'

        with open(DRIVE + 'MODELS/SENTENCE/KERAS/'+sent_model+'/RESULTS/'+split_name+'_'+str(best_epoch)+'.txt') as f:
            dataC = f.readlines()
        dataC = [line.rstrip() for line in dataC]

        li = 0
        pid_tmp = ''
        gt_tmp = ''
        p_dict = {}
        for line in dataC:
            if line[0] == 'P':
                li = 2
                pid_tmp = line
            elif li == 2:
                li = 3
                gt_tmp = line
            elif li == 3:
                pred_tmp = line
                p_dict[pid_tmp] = [gt_tmp, pred_tmp]
                pid_tmp = ''
                gt_tmp = ''
            else:
                li = 0

        comm_data = []
        for line in data:
            pid = line[0]
            atts = line[1].split(',')

            if atts[0] == split_name.lower() and atts[1] == video_id:
                comm_data.append([pid, atts[2], atts[3], p_dict[pid][0], p_dict[pid][1]]) # point_id, start, finish, gt_sent, pred_sent

    # Run Video or Images
    capture = cv2.VideoCapture(vid_path)
    if start is not None:
        if NEW_OCV:
            capture.set(cv2.CAP_PROP_POS_FRAMES, start)
        else:
            capture.set(cv.CV_CAP_PROP_POS_FRAMES, start)

    else:
        start = 0
    if end is None:
        if NEW_OCV:
            end = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
        else:
            end = int(capture.get(cv.CV_CAP_PROP_FRAME_COUNT))

    dq = deque()
    current = start - 1
    if soft is None:
        if NEW_OCV:
            video = cv2.VideoWriter(
                vid_dest_path + model_id + '_' + str(load_epoch) + '_' + '-' + '-'.join(do_splits) + '_hard.avi',
                cv2.VideoWriter_fourcc('F', 'M', 'P', '4'), 25, (1728, 972))
        else:
            video = cv2.VideoWriter(vid_dest_path + model_id + '_' + str(load_epoch) + '-' + '-'.join(do_splits) + '_hard.avi',
                                    cv2.cv.CV_FOURCC('F', 'M', 'P', '4'), 25, (1728, 972))
    else:
        if NEW_OCV:
            video = cv2.VideoWriter(
                vid_dest_path + model_id + '_' + str(load_epoch) + '_' + '-' + '-'.join(do_splits) + '_soft.avi',
                cv2.VideoWriter_fourcc('F', 'M', 'P', '4'), 25, (1728, 972))
        else:
            video = cv2.VideoWriter(vid_dest_path + model_id + '_' + str(load_epoch) + '-' + '-'.join(do_splits) + '_soft.avi',
                                    cv2.cv.CV_FOURCC('F', 'M', 'P', '4'), 25, (1728, 972))

    start_time = time.clock()
    wait_count = 0
    done = 0
    sft = deque()
    if save_timelines:
        out_img_size = 40
        out_img = np.ones((1,out_img_size*2 +5+10+5,3), dtype=np.uint8)*255
        out_min_img = np.ones((1,out_img_size*2 +5+10+5,3), dtype=np.uint8)*255
        cnt = 0
        mid_cnt = 40
        if soft is None:
            time_img_name = vid_dest_path + model_id + '_' + str(load_epoch) + '_' + '-' + '-'.join(do_splits) + '_hard'
        else:
            time_img_name = vid_dest_path + model_id + '_' + str(load_epoch) + '_' + '-' + '-'.join(do_splits) + '_soft'
    while True:
        if wait_count == 0:
            display_frame = np.ones((972, 1728, 3), dtype=np.uint8)*30
            flag, frame = capture.read()  # Flag returns 1 for success, 0 for failure. Frame is the currently processed frame

            if flag == 0:  # Something is wrong with your data, or the end of the video file was reached
                break

            current += 1

            if current > end-1:
                break

            if current not in do:
                continue

        if not os.path.exists("%s%s_F%08d.npy" % (prob_path, video_id, current)):
            dq.append(np.zeros((7)))
            print "%s%s_F%08d.npy does not exist, will wait 5 seconds and try again" % (prob_path, video_id, current)
            wait_count +=1
            # time.sleep(5)
            if wait_count < 2:
                continue
            else:
                wait_count = 0

        else:
            probs = np.load("%s%s_F%08d.npy" % (prob_path, video_id, current))
            dq.append(probs)
            wait_count = 0
        if len(dq)>1500:
           dq.popleft()

        display_frame[-512:,:910,:] = cv2.resize(frame,(910,512))

        # Hardcoded crop to focus on tennis court

        # for i in range(256):
        #     for j in range(256):
        #         display_frame[2*i:2*i+2,920+2*j:920+2*j+2,:] = frame2[i,j,:]
        # display_frame[-512:,1000:1512,:] = np.repeat(np.repeat(cv2.resize(frame,(256,256)),2, axis=0), 2, axis=1)
        # if is_flow:
        #     display_frame[-512:,1000:1512,:] = cv2.resize(cv2.imread(),(512,512))
        # else:
        #     display_frame[-512:,1000:1512,:] = cv2.resize(frame,(512,512))

        dim = get_input_size(model_id)
        display_frame[-512+int((512-dim)/2.0):-int((512-dim)/2.0),1000+int((512-dim)/2.0):1512-int((512-dim)/2.0),:] = \
            cv2.resize(cv2.imread("%s%s_F%08d.png" % (img_dir_path, video_id, current)),(dim,dim))

        display_frame[-519:-512,:,:] = 200

        # #######################256
        colours = [[200,200,200],[35,118,211],[34,190,204],[191,185,31],[79,180,30],[31,31,191],[160,26,26]]

        extra = 0
        if soft is not None:
            extra = 1

        vert_slices = math.floor((900-512)/(7+1+1+extra))
        vs_wp = vert_slices-10
        for i in range(7):
            bottom = (i+1)*vert_slices
            for c in range(len(dq)):
                top = (1-dq[len(dq)-c-1][i])*vs_wp+10+i*vert_slices
                left = 1500-c
                right = 1501-c
                display_frame[min(top,bottom-1):bottom,left:right,:] = colours[i]


            display_frame[bottom-vs_wp:bottom,1510:1515,:] = colours[i]
            cv2.putText(display_frame,labels[i].rstrip(), (1520,int(bottom)-15), cv2.FONT_HERSHEY_SIMPLEX, .4, (200,200,200))

        bottom = 8*vert_slices

        if save_timelines: # write out image file
            frame_img = np.ones((1,out_img_size*2 +5+10+5,3), dtype=np.uint8)*255
            top_ = 0
            bottom_ = 0
            for class_num in range(len(labels)):
                bottom_ = top_
                top_ += (dq[len(dq) - 1][class_num]) * out_img_size

                frame_img[:, int(round(bottom_)):max(int(round(top_)), out_img_size), :] = colours[class_num]

            frame_img[:,(out_img_size + 5):(out_img_size + 15), :] = colours[np.argmax(dq[len(dq) - 1])]

            if soft is not None:
                id = "%s_F%08d.png" % (video_id, current)

                if id in soft.keys():
                    top_ = 0
                    bottom_ = 0
                    # print '---'
                    for class_num in range(len(labels)):
                        if soft[id][class_num] > 0:
                            bottom_ = top_
                            top_ += soft[id][class_num]*out_img_size
                            frame_img[:,out_img_size + 5+15+int(round(bottom_)):out_img_size + 5+15 + max(int(round(top_)),out_img_size), :] = colours[class_num]
                else:
                    frame_img[:,out_img_size + 5+15:out_img_size * 2 + 5+15, :] = colours[all[current]]
            else:
                frame_img[:,out_img_size + 5+15:out_img_size * 2 + 5+15, :] = colours[all[current]]

            frame_img[:, out_img_size + 1 + 15:out_img_size + 4 + 15, :] = [40, 40, 40]

            if cnt % 250 == 0:
                mid_cnt += 35
                frame_img[:, out_img_size + 1 + 15:out_img_size + 4 + 15, :] = [200, 200, 200]

            if cnt % 1500 == 0:
                mid_cnt = 40

            # frame_img[:, out_img_size + 1 + 15:out_img_size+4 + 15, :] = [40,40,40]#[mid_cnt,mid_cnt,mid_cnt]

            cnt += 1
            out_img = np.append(out_img, frame_img, axis=0)
            out_min_img = np.append(out_min_img, frame_img, axis=0)

            if cnt % 1500 == 0:
                cv2.imwrite(time_img_name + '_all.png', np.swapaxes(out_img, 0, 1))

                cv2.imwrite('%s_m%02d.png' % (time_img_name,int(round(cnt/1500))), np.swapaxes(out_min_img, 0, 1))
                out_min_img = np.ones((1, out_img_size * 2 + 5+10+5, 3), dtype=np.uint8) * 20

        if show_commentary:
            for comm in comm_data:
                if int(comm[1]) <= current <= int(comm[2]):
                    # this frame is part of a point so print out sent
                    cv2.putText(display_frame, 'GT:    '+comm[3], (10, int(bottom) + 60), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 200, 0))
                    cv2.putText(display_frame, 'PRED:  '+comm[4], (10, int(bottom) + 90), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 200, 200))

        for c in range(len(dq)):
            best_ind = np.argmax(dq[len(dq)-c-1])
            top = (1-dq[len(dq)-c-1][best_ind])*vs_wp+10+7*vert_slices
            left = 1500-c
            right = 1501-c
            display_frame[min(top,bottom-1):bottom,left:right,:] = colours[best_ind]

            display_frame[bottom+5:bottom+10,left:right,:] = colours[best_ind]

            # actual set data ground truth
            if current-c in equalised:
                display_frame[bottom+20:bottom+25,left:right,:] = colours[equalised[current-c]]
            # all GT .. if soft...
            if soft is None:
                if current-c in all:
                    display_frame[bottom+30:bottom+35,left:right,:] = colours[all[current-c]]
            else:
                if c == 0:
                    id = "%s_F%08d.png" % (video_id, current-c)

                    if id in soft.keys():
                        ht = 0
                        mht = 5+vert_slices
                        # print '---'
                        for v in range(len(labels)):
                            if soft[id][v] > 0:
                                htp = ht
                                ht += int(soft[id][v]*mht)
                                # print v
                                display_frame[bottom + 30 + htp:min(bottom + 30 + ht,bottom + 30 +mht), left:right, :] = colours[v]

                        sft.append(display_frame[bottom + 30:bottom + 35 + vert_slices, left:right, :])
                        if len(sft) > 1500:
                            sft.popleft()
                    else:
                        print 'NO KEY FOR: ' + id + ' ... USING HARD CLASS: ' + all[current - c]
                        display_frame[bottom + 30:bottom + 35+vert_slices, left:right, :] = colours[all[current - c]]
                else:
                    # print len(sft)
                    if c<= len(sft):
                        display_frame[bottom + 30:bottom + 35 + vert_slices, left:right, :] = sft[len(sft)-1-c]


        cv2.putText(display_frame,'COMBINED', (1510,int(bottom)-15), cv2.FONT_HERSHEY_SIMPLEX, .4, (200,200,200))

        for split in splits:
            if int(split[0]) <= current <= int(split[1]):
                cv2.putText(display_frame,split[2].upper() + ' DATA', (1510,int(bottom)+25), cv2.FONT_HERSHEY_SIMPLEX, .4, (0,200,0))
                break


        video.write(display_frame)
        done += 1
        #print current

        perc = 2
        if done > 0:
            if done % int(perc*(end-start)/100) == 0:
                tr=(len(do)-done)/((done)/(time.clock()-start_time))
                print '---'*20
                print 'Image: %d / %d (%d); Approx Overall Time Remaining: %02d:%02d:%02d' % (done,len(do), math.ceil(100*(done/float(len(do)))),int((tr/60)/60),int((tr/60)%60),int(tr%60))
            # print '---'*20

        if show:
            cv2.imshow('Display',display_frame)
            # Press the q key to exit
            if cv2.waitKey(1) & 0xFF == ord('q'):
               break

    if save_timelines:
        cv2.imwrite(time_img_name+'_all.png', np.swapaxes(out_img, 0, 1))
    video.release()
    capture.release()
    cv2.destroyAllWindows()