def process_sequence(seq,
                     detections_path,
                     add_masks,
                     tracker_options,
                     optical_flow_path,
                     temp_out,
                     n_timesteps=None,
                     start_time_at_1=False):
    assert n_timesteps is not None
    #n_timesteps = TIMESTEPS_PER_SEQ[seq]
    det_boxes, det_scores, reid_features, det_classes, det_masks = \
      import_detections_for_sequence(seq, n_timesteps, detections_path, "", 0, add_masks)

    if tracker_options["mask_iou_weight_car"] > 0.0 or \
        tracker_options["mask_iou_weight_pedestrian"] > 0.0 or \
        tracker_options["bbox_iou_weight_car"] > 0.0 or \
        tracker_options["bbox_iou_weight_pedestrian"] > 0.0:
        optical_flow = load_optical_flow(seq, optical_flow_path)
    else:
        optical_flow = None

    hyp_tracks = track_single_sequence(tracker_options,
                                       det_boxes,
                                       det_scores,
                                       reid_features,
                                       det_classes,
                                       det_masks,
                                       optical_flow=optical_flow)
    hyp_tracks = make_disjoint(hyp_tracks, "score")
    export_tracking_result_in_kitti_format(seq,
                                           hyp_tracks,
                                           add_masks,
                                           "",
                                           temp_out,
                                           start_time_at_1=start_time_at_1)
    def _forward_video(self, n_timesteps, tag):
        print("Forwarding video...", file=log.v5)
        print(tag, file=log.v5)
        if self.import_detections:
            assert not self.export_embeddings
            assert not self.export_detections
            image_crops = None
            imgs = []
            print("Loading forwarded detections from file...", file=log.v5)
            time_start = perf_counter()
            det_boxes, det_scores, reid_features, det_classes, det_masks = \
              import_detections_for_sequence(tag, n_timesteps, self.config.string("detections_import_path", ""),
                                             self.config.string("model"), self.engine.start_epoch, self.add_masks)
            print("Done.", file=log.v5)
            if self.visualize_detections or self.visualize_tracks:
                print("Loading images for visualization...", file=log.v5)
                batch_size = self.val_data.get_batch_size()
                for t_start in range(0, n_timesteps, batch_size):
                    dict = self.val_data.get_feed_dict_for_next_step()
                    for j in range(batch_size):
                        imgs.append(dict[self.val_data._placeholders[
                            DataKeys.IMAGES][j]])
                print("Done.", file=log.v5)
        else:
            recurrent_state = None
            det_boxes = []
            det_scores = []
            det_classes = []
            det_masks = []
            reid_features = []
            imgs = []
            image_crops = []
            batch_size = self.val_data.get_batch_size()
            time_start = perf_counter()
            for t_start in range(0, n_timesteps, batch_size):
                print(t_start + 1, "/", n_timesteps, file=log.v5)
                recurrent_state, measures, extractions = self._forward_timestep(
                    recurrent_state)

                for j in range(batch_size):
                    t = t_start + j
                    if t >= n_timesteps:
                        continue
                    assert len(extractions[Extractions.DET_BOXES]
                               [0]) == batch_size, len(
                                   extractions[Extractions.DET_BOXES][0])
                    det_boxes_t = extractions[Extractions.DET_BOXES][0][j]
                    det_scores_t = extractions[Extractions.DET_PROBS][0][j]
                    reid_features_t = extractions[
                        Extractions.REID_FEATURES][0][j]
                    det_classes_t = extractions[Extractions.DET_LABELS][0][j]
                    if self.add_masks:
                        if len(det_boxes_t) == 0:
                            det_masks_t = []
                        else:
                            det_masks_t = [
                                cocomask.encode(
                                    np.asfortranarray(m.squeeze(axis=0),
                                                      dtype=np.uint8))
                                for m in np.vsplit(
                                    extractions[Extractions.DET_MASKS][0][j],
                                    len(det_boxes_t))
                            ]
                    else:
                        det_masks_t = [None] * len(det_boxes_t)
                    det_boxes.append(det_boxes_t)
                    det_scores.append(det_scores_t)
                    reid_features.append(reid_features_t)
                    det_classes.append(det_classes_t)
                    det_masks.append(det_masks_t)
                    if self.visualize_detections or self.visualize_tracks or self.export_embeddings:
                        if DataKeys.RAW_IMAGES not in extractions:
                            print(
                                "Can't extract raw images, maybe images in batch have different size?",
                                file=log.v5)
                            assert False
                        img_t = extractions[DataKeys.RAW_IMAGES][0][j]
                        imgs.append(img_t)
                        if self.export_embeddings:
                            det_boxes_t_i = det_boxes_t.astype(dtype=np.int32)
                            for box in det_boxes_t_i:
                                img_crop = imresize(img_t[box[1]:box[3],
                                                          box[0]:box[2], :],
                                                    size=(50, 50))
                                image_crops.append(img_crop / 255.0)

        time_stop_fwd = perf_counter()
        if self.do_tracking:
            if self.tracker_options["mask_iou_weight_car"] > 0.0 or \
              self.tracker_options["mask_iou_weight_pedestrian"] > 0.0 or \
              self.tracker_options["bbox_iou_weight_car"] > 0.0 or \
              self.tracker_options["bbox_iou_weight_pedestrian"] > 0.0:
                optical_flow = load_optical_flow(tag, self.optical_flow_path)
            else:
                optical_flow = None
            hyp_tracks = track_single_sequence(self.tracker_options,
                                               det_boxes,
                                               det_scores,
                                               reid_features,
                                               det_classes,
                                               det_masks,
                                               optical_flow=optical_flow)
            if self.add_masks:
                hyp_tracks = self.make_disjoint_helper(hyp_tracks)
            time_stop_track = perf_counter()
            print("Time for tracking (s):",
                  time_stop_track - time_stop_fwd,
                  "FPS for tracking including forwarding:",
                  n_timesteps / (time_stop_track - time_start),
                  file=log.v5)
            print("Exporting tracking results", file=log.v5)
            time_starts_at_1 = False
            if hasattr(self.val_data,
                       "time_starts_at_1") and self.val_data.time_starts_at_1:
                time_starts_at_1 = True
                print("Starting time at 1 for exporting", file=log.v1)
            export_tracking_result_in_kitti_format(
                tag,
                hyp_tracks,
                self.add_masks,
                self.config.string("model"),
                start_time_at_1=time_starts_at_1)
            if self.visualize_tracks:
                print("Visualizing tracks", file=log.v5)
                visualize_tracks(tag, hyp_tracks, imgs, self.add_masks,
                                 self.interactive_visualization,
                                 self.config.string("model"))
        print("Time for forwarding (s):",
              time_stop_fwd - time_start,
              "FPS for forwarding (wo. tracking):",
              n_timesteps / (time_stop_fwd - time_start),
              file=log.v5)
        if self.export_detections:
            print("Exporting detections", file=log.v5)
            export_detections_for_sequence(tag, det_boxes, det_scores,
                                           reid_features, det_classes,
                                           det_masks,
                                           self.config.string("model"),
                                           self.engine.start_epoch,
                                           self.add_masks)
        if self.export_embeddings:
            print("Exporting embeddings", file=log.v5)
            # Save to export to tensorboard checkpoint
            image_crops = np.stack(image_crops, axis=0)
            embeddings = np.concatenate(reid_features, axis=0)
            labels = np.concatenate(det_classes, axis=0)
            self.embeddings[tag] = [image_crops, embeddings, labels]
        if self.visualize_detections:
            print("Visualizing detections", file=log.v5)
            visualize_detections_for_sequence(tag, det_boxes, det_scores,
                                              det_classes, det_masks, imgs,
                                              self.add_masks,
                                              self.interactive_visualization,
                                              self.config.string("model"))