import xml.etree.cElementTree as ET from Misc import getParamDict import glob root_dir = 'C:/Datasets' actor_id = 4 start_id = 0 end_id = 59 params = getParamDict() actors = params['mot_actors'] sequences = params['mot_sequences'] actor = actors[actor_id] n_frames_list = [] for seq_id in xrange(start_id, end_id + 1): seq_name = sequences[actor][seq_id] out_fname = '{:s}/{:s}/Annotations/{:s}.txt'.format( root_dir, actor, seq_name) out_fid = open(out_fname, 'w') fname = '{:s}/{:s}/Annotations/{:s}.xml'.format(root_dir, actor, seq_name) tree = ET.parse(fname) root = tree.getroot() img_dir = '{:s}/{:s}/Images/{:s}'.format(root_dir, actor, seq_name) n_frames = len(glob.glob('{:s}/*.jpg'.format(img_dir))) n_frames_list.append(n_frames) print 'Processing sequence {:d} :: {:s}'.format(seq_id, seq_name) for frame_obj in tree.iter('frame'): tsrget_list = frame_obj.find('target_list') frame_id = int(frame_obj.attrib['num'])
from Misc import getParamDict from Misc import readDistGridParams import sys import cv2 import numpy as np import os if __name__ == '__main__': params_dict = getParamDict() param_ids = readDistGridParams() pause_seq = 0 gt_col = (0, 0, 255) gt_thickness = 1 sequences = params_dict['sequences'] db_root_dir = '../Datasets' # img_name_fmt='img%03d.jpg' img_name_fmt = 'frame%05d.jpg' opt_gt_ssm = '0' use_opt_gt = 0 seq_id = param_ids['seq_id'] actor = 'VTB' sequences = sequences[actor] n_seq = 100 for seq_id in xrange(n_seq): seq_name = sequences[seq_id] # seq_name = 'nl_mugII_s1'
def getBasicParams(trackers, xv_input_found, extended_db): video_pipeline = ['OpenCV'] cv_sources = ['jpeg', 'mpeg', 'usb camera'] sources = [cv_sources] if xv_input_found: video_pipeline.append('XVision') xv_sources = ['mpeg', 'usb camera', 'firewire camera'] sources.append(xv_sources) initialization = ['manual', 'ground_truth'] color_spaces = ['Grayscale', 'RGB', 'HSV', 'YCrCb', 'HLS', 'Lab'] filters = [ 'none', 'gabor', 'laplacian', 'sobel', 'scharr', 'canny', 'LoG', 'DoG' ] features = ['none', 'hoc'] smoothing = ['none', 'box', 'bilateral', 'gauss', 'median'] smoothing_kernel = map(str, range(3, 26, 2)) params = [ video_pipeline, sources, initialization, color_spaces, filters, features, smoothing, smoothing_kernel, trackers ] labels = [ 'pipeline', 'source', 'initialization', 'color_space', 'filter', 'feature', 'smoothing', 'smoothing_kernel', 'tracker' ] default_id = [ video_pipeline.index('OpenCV'), sources[video_pipeline.index('OpenCV')].index('jpeg'), initialization.index('ground_truth'), color_spaces.index('Grayscale'), filters.index('none'), features.index('none'), smoothing.index('gauss'), smoothing_kernel.index(str(5)), trackers.index('rkl') ] if extended_db: params_dict = getParamDict() task_type = [ params_dict['actors'][id] for id in xrange(len(params_dict['actors'])) ] tasks = [params_dict['sequences'][actor] for actor in task_type] tasks = [seq.values() for seq in tasks] params.extend([task_type, tasks]) labels.extend(['type', 'task']) default_id.extend([0, 3]) else: task_type = ['simple', 'complex'] light_conditions = ['nl', 'dl'] speeds = ['s1', 's2', 's3', 's4', 's5', 'si'] complex_tasks = ['bus', 'highlighting', 'letter', 'newspaper'] simple_tasks = [ 'bookI', 'bookII', 'bookIII', 'cereal', 'juice', 'mugI', 'mugII', 'mugIII' ] robot_tasks = [ 'robot_bookI', 'robot_bookII', 'robot_bookIII', 'robot_cereal', 'robot_juice', 'robot_mugI', 'robot_mugII', 'robot_mugIII' ] tasks = [simple_tasks, complex_tasks, robot_tasks] params.extend([task_type, light_conditions, speeds, tasks]) labels.extend(['type', 'light', 'speed', 'task']) default_id.extend([ task_type.index('simple'), light_conditions.index('nl'), speeds.index('s3'), tasks[task_type.index('simple')].index('juice') ]) return params, labels, default_id