Ejemplo n.º 1
0
    def __call__(self, args, group):
        print "Checking integrity..."

        # read first frame to get sizes
        path = Video.getframepath(0, args.location)
        try:
            im = Image.open(path)
        except IOError:
            print "Cannot read {0}".format(path)
            return
        width, height = im.size

        print "Searching for last frame..."

        # search for last frame
        toplevel = max(int(x)
            for x in os.listdir(args.location))
        secondlevel = max(int(x)
            for x in os.listdir("{0}/{1}".format(args.location, toplevel)))
        maxframes = max(int(os.path.splitext(x)[0])
            for x in os.listdir("{0}/{1}/{2}"
            .format(args.location, toplevel, secondlevel))) + 1

        print "Found {0} frames.".format(maxframes)

        # can we read the last frame?
        path = Video.getframepath(maxframes - 1, args.location)
        try:
            im = Image.open(path)
        except IOError:
            print "Cannot read {0}".format(path)
            return

        # check last frame sizes
        if im.size[0] != width and im.size[1] != height:
            print "First frame dimensions differs from last frame"
            return

        if session.query(Video).filter(Video.slug == args.slug).count():
            print "Video {0} already exists!".format(args.slug)
            return

        if args.train_with:
            if args.for_training:
                print "A training video cannot require training"
                return
            print "Looking for training video..."
            trainer = session.query(Video)
            trainer = trainer.filter(Video.slug == args.train_with)
            if not trainer.count():
                print ("Training video {0} does not exist!"
                    .format(args.train_with))
                return
            trainer = trainer.one()
        else:
            trainer = None

        # create video
        video = Video(slug = args.slug,
                      location = os.path.realpath(args.location), 
                      width = width,
                      height = height,
                      totalframes = maxframes,
                      skip = args.skip,
                      perobjectbonus = args.per_object_bonus,
                      completionbonus = args.completion_bonus,
                      trainwith = trainer,
                      isfortraining = args.for_training,
                      blowradius = args.blow_radius,
                      action = args.action,
                      pose = args.pose)

        if args.for_training:
            video.trainvalidator = qa.tolerable(args.for_training_overlap,
                                                args.for_training_tolerance,
                                                args.for_training_mistakes)
            print "Training validator is {0}".format(video.trainvalidator)

        session.add(video)

        print "Binding labels and attributes..."

        # create labels and attributes
        labelcache = {}
        attributecache = {}
        lastlabel = None
        for labeltext in args.labels:
            if labeltext[0] == "~":
                if lastlabel is None:
                    print "Cannot assign an attribute without a label!"
                    return
                labeltext = labeltext[1:]
                attribute = Attribute(text = labeltext)
                session.add(attribute)
                lastlabel.attributes.append(attribute)
                attributecache[labeltext] = attribute
            else:
                label = Label(text = labeltext)
                session.add(label)
                video.labels.append(label)
                labelcache[labeltext] = label
                lastlabel = label

        print "Creating symbolic link..."
        symlink = "public/frames/{0}".format(video.slug)
        try:
            os.remove(symlink)
        except:
            pass
        os.symlink(video.location, symlink)

        print "Creating segments..."
        # create shots and jobs
       
        if args.for_training:
                segment = Segment(video = video)
                if args.for_training_start:
                    segment.start = args.for_training_start
                    if segment.start < 0:
                        segment.start = 0
                else:
                    segment.start = 0
                if args.for_training_stop:
                    segment.stop = args.for_training_stop
                    if segment.stop > video.totalframes - 1:
                        segment.stop = video.totalframes - 1
                else:
                    segment.stop = video.totalframes - 1
                job = Job(segment = segment, group = group, ready = False)
                session.add(segment)
                session.add(job)
        elif args.use_frames:
            with open(args.use_frames) as useframes:
                for line in useframes:
                    ustart, ustop = line.split()
                    ustart, ustop = int(ustart), int(ustop)
                    validlength = float(ustop - ustart)
                    numsegments = math.ceil(validlength / args.length)
                    segmentlength = math.ceil(validlength / numsegments)

                    for start in range(ustart, ustop, int(segmentlength)):
                        stop = min(start + segmentlength + args.overlap + 1,
                                   ustop)
                        segment = Segment(start = start,
                                          stop = stop, 
                                          video = video)
                        job = Job(segment = segment, group = group)
                        session.add(segment)
                        session.add(job)
        else:
            startframe = args.start_frame
            stopframe = args.stop_frame
            if not stopframe:
                stopframe = video.totalframes - 1
            for start in range(startframe, stopframe, args.length):
                stop = min(start + args.length + args.overlap + 1,
                           stopframe)
                segment = Segment(start = start,
                                    stop = stop,
                                    video = video)
                job = Job(segment = segment, group = group)
                session.add(segment)
                session.add(job)

        if args.per_object_bonus:
            group.schedules.append(
                PerObjectBonus(amount = args.per_object_bonus))
        if args.completion_bonus:
            group.schedules.append(
                CompletionBonus(amount = args.completion_bonus))

        session.add(group)

        if args.for_training and args.for_training_data:
            print ("Loading training ground truth annotations from {0}"
                        .format(args.for_training_data))
            with open(args.for_training_data, "r") as file:
                pathcache = {}
                for line in file:
                    (id, xtl, ytl, xbr, ybr,
                     frame, outside, occluded, generated,
                     label) = line.split(" ")

                    if int(generated):
                        continue

                    if id not in pathcache:
                        print "Imported new path {0}".format(id)
                        label = labelcache[label.strip()[1:-1]]
                        pathcache[id] = Path(job = job, label = label)

                    box = Box(path = pathcache[id])
                    box.xtl = int(xtl)
                    box.ytl = int(ytl)
                    box.xbr = int(xbr)
                    box.ybr = int(ybr)
                    box.frame = int(frame)
                    box.outside = int(outside)
                    box.occluded = int(occluded)
                    pathcache[id].boxes.append(box)

        session.commit()

        if args.for_training:
            if args.for_training and args.for_training_data:
                print "Video and ground truth loaded."
            else:
                print "Video loaded and ready for ground truth:"
                print ""
                print "\t{0}".format(job.offlineurl(config.localhost))
                print ""
                print "Visit this URL to provide training with ground truth."
        else:
            print "Video loaded and ready for publication."
Ejemplo n.º 2
0
    def __call__(self, args, group):
        print "Checking integrity..."

        # read first frame to get sizes
        path = Video.getframepath(0, args.location)
        try:
            im = Image.open(path)
        except IOError:
            print "Cannot read {0}".format(path)
            return
        width, height = im.size

        print "Searching for last frame..."

        # search for last frame
        toplevel = max(int(x) for x in os.listdir(args.location))
        secondlevel = max(
            int(x)
            for x in os.listdir("{0}/{1}".format(args.location, toplevel)))
        maxframes = max(
            int(os.path.splitext(x)[0])
            for x in os.listdir("{0}/{1}/{2}".format(args.location, toplevel,
                                                     secondlevel))) + 1

        print "Found {0} frames.".format(maxframes)

        # can we read the last frame?
        path = Video.getframepath(maxframes - 1, args.location)
        try:
            im = Image.open(path)
        except IOError:
            print "Cannot read {0}".format(path)
            return

        # check last frame sizes
        if im.size[0] != width and im.size[1] != height:
            print "First frame dimensions differs from last frame"
            return

        if session.query(Video).filter(Video.slug == args.slug).count():
            print "Video {0} already exists!".format(args.slug)
            return

        if args.train_with:
            if args.for_training:
                print "A training video cannot require training"
                return
            print "Looking for training video..."
            trainer = session.query(Video)
            trainer = trainer.filter(Video.slug == args.train_with)
            if not trainer.count():
                print("Training video {0} does not exist!".format(
                    args.train_with))
                return
            trainer = trainer.one()
        else:
            trainer = None

        # create video
        video = Video(slug=args.slug,
                      location=os.path.realpath(args.location),
                      width=width,
                      height=height,
                      totalframes=maxframes,
                      skip=args.skip,
                      perobjectbonus=args.per_object_bonus,
                      completionbonus=args.completion_bonus,
                      trainwith=trainer,
                      isfortraining=args.for_training,
                      blowradius=args.blow_radius)

        if args.for_training:
            video.trainvalidator = qa.tolerable(args.for_training_overlap,
                                                args.for_training_tolerance,
                                                args.for_training_mistakes)
            print "Training validator is {0}".format(video.trainvalidator)

        session.add(video)

        print "Binding labels and attributes..."

        # create labels and attributes
        labelcache = {}
        attributecache = {}
        lastlabel = None
        for labeltext in args.labels:
            if labeltext[0] == "~":
                if lastlabel is None:
                    print "Cannot assign an attribute without a label!"
                    return
                labeltext = labeltext[1:]
                attribute = Attribute(text=labeltext)
                session.add(attribute)
                lastlabel.attributes.append(attribute)
                attributecache[labeltext] = attribute
            else:
                label = Label(text=labeltext)
                session.add(label)
                video.labels.append(label)
                labelcache[labeltext] = label
                lastlabel = label

        print "Creating symbolic link..."
        symlink = "public/frames/{0}".format(video.slug)
        try:
            os.remove(symlink)
        except:
            pass
        os.symlink(video.location, symlink)

        print "Creating segments..."
        # create shots and jobs

        if args.for_training:
            segment = Segment(video=video)
            if args.for_training_start:
                segment.start = args.for_training_start
                if segment.start < 0:
                    segment.start = 0
            else:
                segment.start = 0
            if args.for_training_stop:
                segment.stop = args.for_training_stop
                if segment.stop > video.totalframes - 1:
                    segment.stop = video.totalframes - 1
            else:
                segment.stop = video.totalframes - 1
            job = Job(segment=segment, group=group, ready=False)
            session.add(segment)
            session.add(job)
        elif args.use_frames:
            with open(args.use_frames) as useframes:
                for line in useframes:
                    ustart, ustop = line.split()
                    ustart, ustop = int(ustart), int(ustop)
                    validlength = float(ustop - ustart)
                    numsegments = math.ceil(validlength / args.length)
                    segmentlength = math.ceil(validlength / numsegments)

                    for start in range(ustart, ustop, int(segmentlength)):
                        stop = min(start + segmentlength + args.overlap + 1,
                                   ustop)
                        segment = Segment(start=start, stop=stop, video=video)
                        job = Job(segment=segment, group=group)
                        session.add(segment)
                        session.add(job)
        else:
            startframe = args.start_frame
            stopframe = args.stop_frame
            if not stopframe:
                stopframe = video.totalframes - 1
            for start in range(startframe, stopframe, args.length):
                stop = min(start + args.length + args.overlap + 1, stopframe)
                segment = Segment(start=start, stop=stop, video=video)
                job = Job(segment=segment, group=group)
                session.add(segment)
                session.add(job)

        if args.per_object_bonus:
            group.schedules.append(
                PerObjectBonus(amount=args.per_object_bonus))
        if args.completion_bonus:
            group.schedules.append(
                CompletionBonus(amount=args.completion_bonus))

        session.add(group)

        if args.for_training and args.for_training_data:
            print("Loading training ground truth annotations from {0}".format(
                args.for_training_data))
            with open(args.for_training_data, "r") as file:
                pathcache = {}
                for line in file:
                    (id, xtl, ytl, xbr, ybr, frame, outside, occluded,
                     generated, label) = line.split(" ")

                    if int(generated):
                        continue

                    if id not in pathcache:
                        print "Imported new path {0}".format(id)
                        label = labelcache[label.strip()[1:-1]]
                        pathcache[id] = Path(job=job, label=label)

                    box = Box(path=pathcache[id])
                    box.xtl = int(xtl)
                    box.ytl = int(ytl)
                    box.xbr = int(xbr)
                    box.ybr = int(ybr)
                    box.frame = int(frame)
                    box.outside = int(outside)
                    box.occluded = int(occluded)
                    pathcache[id].boxes.append(box)

        session.commit()

        if args.for_training:
            if args.for_training and args.for_training_data:
                print "Video and ground truth loaded."
            else:
                print "Video loaded and ready for ground truth:"
                print ""
                print "\t{0}".format(job.offlineurl(config.localhost))
                print ""
                print "Visit this URL to provide training with ground truth."
        else:
            print "Video loaded and ready for publication."
Ejemplo n.º 3
0
Archivo: cli.py Proyecto: mmisono/vatic
    def __call__(self, args):
        video = session.query(Video).filter(Video.slug == args.slug)
        if video.count() == 0:
            print "Video {0} does not exist!".format(args.slug)
            raise SystemExit()
        video = video.one()

        print "Parsing text data"
        data = {}
        if args.json:
            data = self.getdatajson(args.labelfile)
        else:
            data = self.getdatatext(args.labelfile, video.totalframes)

        scale = args.scale
        if args.dimensions or args.original_video or args.original_frame:
            print "Computing scale"
            if args.original_video:
                w, h = ffmpeg.extract(args.original_video).next().size
            elif args.original_frame:
                w, h = Image.open(args.original_frame).size
            else:
                w, h = args.dimensions.split("x")
            w = float(w)
            h = float(h)
            s = float(video.width) / w
            if s * h > video.height:
                s = float(video.height) / h
            scale = s
            print "Scale = {0}".format(scale)

        segmentcount = 1
        for segment in video.segments:
            print "Segment {0} of {1}".format(segmentcount, len(video.segments))
            segmentcount += 1
            for job in segment.jobs:
                for boxid in data:
                    label = data[boxid]['label']
                    boxes = data[boxid]['boxes']

                    query = session.query(Label).filter(Label.videoid == video.id).filter(Label.text == label)
                    if query.count() == 0:
                        continue
                    label = query.one()
 
                    newpath = Path(label=label)
                    visible = False
                    for frame, boxdata in boxes.iteritems():
                        frame = int(frame)
                        if frame < segment.start or segment.stop <= frame or (frame % video.blowradius != 0):
                            continue
                        newbox = Box(path=newpath)
                        #newbox.xtl = max(boxdata['xtl'], 0)
                        #newbox.ytl = max(boxdata['ytl'], 0)
                        #newbox.xbr = max(boxdata['xbr'], 0)
                        #newbox.ybr = max(boxdata['ybr'], 0)
                        newbox.xtl = boxdata['xtl']
                        newbox.ytl = boxdata['ytl']
                        newbox.xbr = boxdata['xbr']
                        newbox.ybr = boxdata['ybr']

                        newbox.occluded = boxdata['occluded']
                        newbox.outside = boxdata['outside']
                        newbox.generated = boxdata['generated']
                        newbox.frame = frame

                        scalebox = newbox.getbox()
                        scalebox = scalebox.transform(scale)
                        newbox.xtl = scalebox.xtl
                        newbox.ytl = scalebox.ytl
                        newbox.xbr = scalebox.xbr
                        newbox.ybr = scalebox.ybr

                        if not newbox.outside:
                            visible = True

                    if visible:
                        job.paths.append(newpath)

                session.add(job)
        session.commit()
Ejemplo n.º 4
0
    def __call__(self, args):
        video = session.query(Video).filter(Video.slug == args.slug)
        if video.count() == 0:
            print "Video {0} does not exist!".format(args.slug)
            raise SystemExit()
        video = video.one()

        print "Parsing text data"
        data = {}
        if args.json:
            data = self.getdatajson(args.labelfile)
        else:
            data = self.getdatatext(args.labelfile, video.totalframes)

        scale = args.scale
        if args.dimensions or args.original_video or args.original_frame:
            print "Computing scale"
            if args.original_video:
                w, h = ffmpeg.extract(args.original_video).next().size
            elif args.original_frame:
                w, h = Image.open(args.original_frame).size
            else:
                w, h = args.dimensions.split("x")
            w = float(w)
            h = float(h)
            s = float(video.width) / w
            if s * h > video.height:
                s = float(video.height) / h
            scale = s
            print "Scale = {0}".format(scale)

        segmentcount = 1
        for segment in video.segments:
            print "Segment {0} of {1}".format(segmentcount,
                                              len(video.segments))
            segmentcount += 1
            for job in segment.jobs:
                for boxid in data:
                    label = data[boxid]['label']
                    boxes = data[boxid]['boxes']

                    query = session.query(Label).filter(
                        Label.videoid == video.id).filter(Label.text == label)
                    if query.count() == 0:
                        continue
                    label = query.one()

                    newpath = Path(label=label)
                    visible = False
                    for frame, boxdata in boxes.iteritems():
                        frame = int(frame)
                        if frame < segment.start or segment.stop <= frame or (
                                frame % video.blowradius != 0):
                            continue
                        newbox = Box(path=newpath)
                        #newbox.xtl = max(boxdata['xtl'], 0)
                        #newbox.ytl = max(boxdata['ytl'], 0)
                        #newbox.xbr = max(boxdata['xbr'], 0)
                        #newbox.ybr = max(boxdata['ybr'], 0)
                        newbox.xtl = boxdata['xtl']
                        newbox.ytl = boxdata['ytl']
                        newbox.xbr = boxdata['xbr']
                        newbox.ybr = boxdata['ybr']

                        newbox.occluded = boxdata['occluded']
                        newbox.outside = boxdata['outside']
                        newbox.generated = boxdata['generated']
                        newbox.frame = frame

                        scalebox = newbox.getbox()
                        scalebox = scalebox.transform(scale)
                        newbox.xtl = scalebox.xtl
                        newbox.ytl = scalebox.ytl
                        newbox.xbr = scalebox.xbr
                        newbox.ybr = scalebox.ybr

                        if not newbox.outside:
                            visible = True

                    if visible:
                        job.paths.append(newpath)

                session.add(job)
        session.commit()
Ejemplo n.º 5
0
    def __call__(self, args, group):
        print "Checking integrity..."

        # read first frame to get sizes
        path = Video.getframepath(0, args.location)
        try:
            im = Image.open(path)
        except IOError:
            print "Cannot read {0}".format(path)
            return
        width, height = im.size

        print "Searching for last frame..."

        # search for last frame
        toplevel = max(int(x) for x in os.listdir(args.location))
        secondlevel = max(
            int(x)
            for x in os.listdir("{0}/{1}".format(args.location, toplevel)))
        maxframes = max(
            int(os.path.splitext(x)[0])
            for x in os.listdir("{0}/{1}/{2}".format(args.location, toplevel,
                                                     secondlevel))) + 1

        print "Found {0} frames.".format(maxframes)

        # can we read the last frame?
        path = Video.getframepath(maxframes - 1, args.location)
        try:
            im = Image.open(path)
        except IOError:
            print "Cannot read {0}".format(path)
            return

        # check last frame sizes
        if im.size[0] != width and im.size[1] != height:
            print "First frame dimensions differs from last frame"
            return

        if session.query(Video).filter(Video.slug == args.slug).count():
            print "Video {0} already exists!".format(args.slug)
            return

        if args.train_with:
            if args.for_training:
                print "A training video cannot require training"
                return
            print "Looking for training video..."
            trainer = session.query(Video)
            trainer = trainer.filter(Video.slug == args.train_with)
            if not trainer.count():
                print("Training video {0} does not exist!".format(
                    args.train_with))
                return
            trainer = trainer.one()
        else:
            trainer = None

        # create video
        video = Video(slug=args.slug,
                      location=os.path.realpath(args.location),
                      width=width,
                      height=height,
                      totalframes=maxframes,
                      skip=args.skip,
                      perobjectbonus=args.per_object_bonus,
                      completionbonus=args.completion_bonus,
                      trainwith=trainer,
                      isfortraining=args.for_training,
                      blowradius=args.blow_radius)

        if args.for_training:
            video.trainvalidator = qa.tolerable(args.for_training_overlap,
                                                args.for_training_tolerance,
                                                args.for_training_mistakes)
            print "Training validator is {0}".format(video.trainvalidator)

        session.add(video)

        print "Binding labels and attributes..."

        # create labels and attributes
        labelcache = {}
        attributecache = {}
        lastlabel = None
        for labeltext in args.labels:
            if labeltext[0] == "~":
                if lastlabel is None:
                    print "Cannot assign an attribute without a label!"
                    return
                labeltext = labeltext[1:]
                attribute = Attribute(text=labeltext)
                session.add(attribute)
                lastlabel.attributes.append(attribute)
                attributecache[labeltext] = attribute
            else:
                label = Label(text=labeltext)
                session.add(label)
                video.labels.append(label)
                labelcache[labeltext] = label
                lastlabel = label

        print "Creating symbolic link..."
        symlink = "public/frames/{0}".format(video.slug)
        try:
            os.remove(symlink)
        except:
            pass
        os.symlink(video.location, symlink)

        print "Creating segments..."
        # create shots and jobs

        job_list = []
        segment_list = []

        if args.for_training:
            segment = Segment(video=video)
            if args.for_training_start:
                segment.start = args.for_training_start
                if segment.start < 0:
                    segment.start = 0
            else:
                segment.start = 0
            if args.for_training_stop:
                segment.stop = args.for_training_stop
                if segment.stop > video.totalframes - 1:
                    segment.stop = video.totalframes - 1
            else:
                segment.stop = video.totalframes - 1
            job = Job(segment=segment, group=group, ready=False)
            job_list.append(job)
            segment_list.append(segment)
            session.add(segment)
            session.add(job)
        elif args.use_frames:
            with open(args.use_frames) as useframes:
                for line in useframes:
                    ustart, ustop = line.split()
                    ustart, ustop = int(ustart), int(ustop)
                    validlength = float(ustop - ustart)
                    numsegments = math.ceil(validlength / args.length)
                    segmentlength = math.ceil(validlength / numsegments)

                    for start in range(ustart, ustop, int(segmentlength)):
                        stop = min(start + segmentlength + args.overlap + 1,
                                   ustop)
                        segment = Segment(start=start, stop=stop, video=video)
                        job = Job(segment=segment, group=group)
                        job_list.append(job)
                        segment_list.append(segment)

                        session.add(segment)
                        session.add(job)
        else:
            startframe = args.start_frame
            stopframe = args.stop_frame
            if not stopframe:
                stopframe = video.totalframes - 1
            for start in range(startframe, stopframe, args.length):
                stop = min(start + args.length + args.overlap + 1, stopframe)
                segment = Segment(start=start, stop=stop, video=video)
                job = Job(segment=segment, group=group)
                job_list.append(job)
                segment_list.append(segment)

                session.add(segment)
                session.add(job)

        if args.per_object_bonus:
            group.schedules.append(
                PerObjectBonus(amount=args.per_object_bonus))
        if args.completion_bonus:
            group.schedules.append(
                CompletionBonus(amount=args.completion_bonus))

        session.add(group)

        if args.for_training and args.for_training_data:
            print("Loading training ground truth annotations from {0}".format(
                args.for_training_data))
            with open(args.for_training_data, "r") as file:
                pathcache = {}
                for line in file:
                    (id, xtl, ytl, xbr, ybr, frame, outside, occluded,
                     generated, label) = line.split(" ")

                    if int(generated):
                        continue

                    if id not in pathcache:
                        print "Imported new path {0}".format(id)
                        label = labelcache[label.strip()[1:-1]]
                        pathcache[id] = Path(job=job, label=label)

                    box = Box(path=pathcache[id])
                    box.xtl = int(xtl)
                    box.ytl = int(ytl)
                    box.xbr = int(xbr)
                    box.ybr = int(ybr)
                    box.frame = int(frame)
                    box.outside = int(outside)
                    box.occluded = int(occluded)
                    pathcache[id].boxes.append(box)

        session.commit()

        # Save the annotated file in the database
        if args.annot is not None:
            with open(args.annot, 'r') as annot_file:
                annotated_tracks = json.load(annot_file)

            # Scale annotations if annot-video-height is given as argument
            annot_scalar = 1.0
            if args.annot_video_height is not None:
                annot_scalar = video.height / float(
                    args.annot_video_height) * 1.5
                print('Scale factor: {}'.format(annot_scalar))

            # Scale bboxes and convert labels
            converted_tracks = []
            for a_labels, a_tracks, a_attribs in annotated_tracks:
                scaled_a_tracks = {}
                keep_tracks = a_tracks.keys()

                # Blow-radius for annotations
                if args.annot_blow_radius > 0:
                    keep_tracks = sorted(map(int, keep_tracks))
                    prev_frame_id = keep_tracks[0]
                    blown_tracks = [keep_tracks[0]]
                    for frame_id in keep_tracks:
                        if frame_id > prev_frame_id + args.annot_blow_radius:
                            prev_frame_id = frame_id
                            blown_tracks.append(frame_id)
                    keep_tracks = map(unicode, blown_tracks)

                # Conversion
                for track_id, track_data in a_tracks.iteritems():
                    if track_id in keep_tracks:
                        scaled_track = [
                            x * annot_scalar for x in track_data[:4]
                        ]
                        scaled_track.extend(track_data[4:])
                        scaled_a_tracks[track_id] = scaled_track
                converted_tracks.append(
                    [labelcache[a_labels].id, scaled_a_tracks, a_attribs])
            for j, s in zip(job_list, segment_list):
                job_data = []
                for a_labels, a_tracks, a_attribs in converted_tracks:
                    # Sort by track number
                    sorted_a_tracks = natural_sort(a_tracks.keys())
                    sorted_a_attribs = natural_sort(a_attribs.keys())

                    job_tracks = {}
                    job_attribs = {k.id: {} for k in attributecache.values()}
                    for track_id in sorted_a_tracks:
                        track_data = a_tracks[track_id]
                        track_id = int(track_id)
                        if s.start <= track_id <= s.stop:
                            job_tracks[track_id] = track_data
                        if track_id > s.stop:
                            break
                    for track_id in sorted_a_attribs:
                        attrib_data = a_attribs[track_id]
                        track_id = int(track_id)
                        if s.start <= track_id <= s.stop:
                            for k in job_attribs.keys():
                                job_attribs[k][track_id] = 0
                            job_attribs[
                                attributecache[attrib_data].id][track_id] = 1
                        if track_id > s.stop:
                            break
                    job_data.append([a_labels, job_tracks, job_attribs])
                savejob(j.id, job_data)

        if args.for_training:
            if args.for_training and args.for_training_data:
                print "Video and ground truth loaded."
            else:
                print "Video loaded and ready for ground truth:"
                print ""
                print "\t{0}".format(job.offlineurl(config.localhost))
                print ""
                print "Visit this URL to provide training with ground truth."
        else:
            print "Video loaded and ready for publication."