Пример #1
0
def create_tracking_files():
    # files = glob.glob('/scail/group/deeplearning/driving_data/andriluka/IMAGES/driving_data_q50_data/all_extracted/*.al')
    # files = [f for f in files if f.find('edit') == -1 and f.find('track') == -1]

    files = glob.glob('/scail/group/deeplearning/driving_data/andriluka/IMAGES/driving_data_q50_data/all_extracted/benchmark_seq*_cleanup_trackid_every20.pal')
    absolute_path_to_relative_path(files)
    all_args = []
    for f in files:
        all_args.append(('-a %s -o ./ ' % f).split(' '))

    pool = multiprocessing.Pool(processes=9)
    for i, result in enumerate( pool.imap_unordered(m.main, all_args) ):
        print "done %0.1f%%" % (float(i+1) * 100 / len(all_args))
    pool.close()
    pool.join()

    # remove partials
    partials = glob.glob('*partial.pal')
    for p in partials:
        os.remove(p)

    # change paths
    for f in files:
        tracked_file = os.path.basename(f).replace('.pal','-track.pal')
        imgs = al.parse(tracked_file)
        for img in imgs:
            if img.imageName[0] == '/':
                img.imageName = '/'.join(img.imageName.split('/')[-2:])
        al.save(tracked_file, imgs)
Пример #2
0
def absolute_path_to_relative_path(files):
    for f in files:
        annotations = al.parse(f)
        for img in annotations:
            if img.imageName[0] == '/':
                img.imageName = '/'.join(img.imageName.split('/')[-2:])
        al.save(f, annotations)
Пример #3
0
                rect.x1 = vatic_boxes[bidx].xtl;
                rect.y1 = vatic_boxes[bidx].ytl;
                rect.x2 = vatic_boxes[bidx].xbr;
                rect.y2 = vatic_boxes[bidx].ybr

                cur_frame = vatic_boxes[bidx].frame;
                annolist[cur_frame].rects.append(rect);


    # save annolist
    fname_ext = os.path.basename(opts.vatic_filename);
    fname = os.path.splitext(fname_ext)[0];

    pal_filename = opts.output_dir + "/" + fname + ".pal";
    print "saving ", pal_filename;
    AnnotationLib.save(pal_filename, annolist);


        

    # if not os.path.exists(opts.output_dir):
    #     print "creating ", opts.output_dir;
    #     os.makedirs(opts.output_dir);

    # imglist = glob.glob(opts.input_dir + "/*.jpeg");
    # imglist.sort();

    # idxlist = range(0, len(imglist), opts.imgstep);

    # print idxlist;
Пример #4
0
                rect.id = tidx
                rect.x1 = vatic_boxes[bidx].xtl
                rect.y1 = vatic_boxes[bidx].ytl
                rect.x2 = vatic_boxes[bidx].xbr
                rect.y2 = vatic_boxes[bidx].ybr

                cur_frame = vatic_boxes[bidx].frame
                annolist[cur_frame].rects.append(rect)

    # save annolist
    fname_ext = os.path.basename(opts.vatic_filename)
    fname = os.path.splitext(fname_ext)[0]

    pal_filename = opts.output_dir + "/" + fname + ".pal"
    print "saving ", pal_filename
    AnnotationLib.save(pal_filename, annolist)

    # if not os.path.exists(opts.output_dir):
    #     print "creating ", opts.output_dir;
    #     os.makedirs(opts.output_dir);

    # imglist = glob.glob(opts.input_dir + "/*.jpeg");
    # imglist.sort();

    # idxlist = range(0, len(imglist), opts.imgstep);

    # print idxlist;

    # if idxlist[-1] != len(imglist) - 1:
    #     idxlist.append(len(imglist) - 1);
Пример #5
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("hits_dir", help="hit directory")
    parser.add_argument("--print_empty", action="store_true", help="print info on empty hits");
    parser.add_argument("--save_worker_results", action="store_true", help="save results for each individual worker");

    #parser.add_argument("output_ext", default=".al", help="output format .idl/.al/.pal")
    #parser.add_argument("--url_prefix", help="path on S3 that should be removed when converting to annolist")   

    args = parser.parse_args()

    #url_prefix = bash_var_to_py("./data_utils_init.sh", "S3_HOST_DIR")
    url_prefix = bash_var_to_py(args.hits_dir + "/hit_params.sh", "S3_HOST_DIR")

    args.output_ext = ".pal"

    if url_prefix[-1] != os.sep:
        url_prefix += os.sep;
    
    hit_name = get_hit_name(args.hits_dir)
    #pal_name = args.hits_dir + '/' + hit_name + '.pal'
    pal_name = args.hits_dir + '/' + hit_name + args.output_ext

    results_filename = args.hits_dir + '/' + hit_name + '.results'
    results_by_worker_dir = '%s/results_by_worker_%s' % (args.hits_dir, hit_name)
    subprocess.call(['mkdir', '-p', results_by_worker_dir])
    try:
        with open(args.hits_dir + '/bad_workers.txt', 'r') as f:
            bad_workerids = set(x.strip() for x in f.readlines())
    except IOError:
        bad_workerids = set()

    with open(results_filename, 'r') as results_file:
        results_list = list(csv.reader(results_file, delimiter='\t'))
        invert = lambda d: {v:k for k,v in d.iteritems()}
        columns = invert(dict(enumerate(results_list[0])))

        annotation_list = AnnotationLib.AnnoList();
        hit_type = "";

        for each in results_list[1:]:
            if each[columns['hitstatus']] == 'Reviewable' and each[columns['workerid']] not in bad_workerids:
                cur_hit_type = each[columns['Answer.results']].split(',')[0];
                if not hit_type:
                    hit_type = cur_hit_type;
                else:
                    assert(hit_type == cur_hit_type);

                a = annotation_from_result(each, columns, url_prefix);
                annotation_list.append(a);

        # MA: some hit types require special post-processing
        if hit_type == "label_act":
            annotation_list = merge_labelact_annolist(annotation_list);
            annotation_list.add_attribute("gender", int);
            annotation_list.add_attribute("ptype", int);
            annotation_list.add_attribute("act", int);

            annotation_list.add_attribute_val("gender", "male", ATTR_VAL_GENDER_MALE);
            annotation_list.add_attribute_val("gender", "female", ATTR_VAL_GENDER_FEMALE);

            annotation_list.add_attribute_val("ptype", "sales", ATTR_VAL_PTYPE_SALES);
            annotation_list.add_attribute_val("ptype", "customer", ATTR_VAL_PTYPE_CUST);

            annotation_list.add_attribute_val("act", "interact_with_customer", ATTR_VAL_ACT_SALES_INT);
            annotation_list.add_attribute_val("act", "clean", ATTR_VAL_ACT_SALES_CLEAN);
            annotation_list.add_attribute_val("act", "other_sales", ATTR_VAL_ACT_SALES_OTHER);

            annotation_list.add_attribute_val("act", "queue", ATTR_VAL_ACT_CUST_QUEUE);
            annotation_list.add_attribute_val("act", "interact_with_sales", ATTR_VAL_ACT_CUST_INT);
            annotation_list.add_attribute_val("act", "browse", ATTR_VAL_ACT_CUST_BROWSE);
            annotation_list.add_attribute_val("act", "other_customer", ATTR_VAL_ACT_CUST_OTHER);


        AnnotationLib.save(pal_name, annotation_list)

        if args.save_worker_results:
            for workerid in set(x[columns['workerid']] for x in results_list[1:]):
                if workerid == '':
                    print '%s missing entries' % sum(1 for each in results_list[1:]
                                                     if each[columns['workerid']] == '')
                    continue

                annotation_list = AnnotationLib.AnnoList([annotation_from_result(each, columns, url_prefix)
                                                          for each in results_list[1:]
                                                          if each[columns['hitstatus']] == 'Reviewable'
                                                          and each[columns['workerid']] == workerid]);

                output_filename = '%s/%s.pal' % (results_by_worker_dir, workerid);
                #print "saving ", output_filename;

                print "worker: {}, number of annotations: {}".format(workerid, len(annotation_list));
                AnnotationLib.save(output_filename, annotation_list)

        if args.print_empty:
            for res in results_list[1:]:
                if res[columns['Answer.results']] == "":
                    print "empty hit: ", res

        # show statistics on empty results (happens due to unknown bug in javascript tool)
        empty_res_workers = [each[columns['workerid']] for each in results_list[1:] if each[columns['Answer.results']] == ""];

        print "empty output by workerid: " 
        for workerid, worker_empty_count in collections.Counter(empty_res_workers).items():
            print workerid, worker_empty_count

        num_empty = sum((1 for each in results_list[1:] if each[columns['Answer.results']] == ""));
        print "number of empty results: ", num_empty