def calculate_reprojection_errors(h5_filename=None,
                                  output_h5_filename=None,
                                  kalman_filename=None,
                                  from_source=None,
                                  start=None,
                                  stop=None,
                                  show_progress=False,
                                  show_progress_json=False,
                                  ):
    assert from_source in ['ML_estimates','smoothed']
    if os.path.exists( output_h5_filename ):
        raise RuntimeError(
            "will not overwrite old file '%s'"%output_h5_filename)

    out = {'camn':[],
           'frame':[],
           'obj_id':[],
           'dist':[],
           'z':[],
           }

    ca = core_analysis.get_global_CachingAnalyzer()
    with ca.kalman_analysis_context( kalman_filename,
                                     data2d_fname=h5_filename ) as h5_context:
        R = h5_context.get_reconstructor()
        ML_estimates_2d_idxs = h5_context.load_entire_table('ML_estimates_2d_idxs')
        use_obj_ids = h5_context.get_unique_obj_ids()

        extra = h5_context.get_extra_info()

        if from_source=='smoothed':
            dynamic_model_name = extra['dynamic_model_name']
            if dynamic_model_name.startswith('EKF '):
                dynamic_model_name = dynamic_model_name[4:]

        fps = h5_context.get_fps()
        camn2cam_id, cam_id2camns = h5_context.get_caminfo_dicts()

        # associate framenumbers with timestamps using 2d .h5 file
        data2d = h5_context.load_entire_table('data2d_distorted',
                                              from_2d_file=True )
        data2d_idxs = np.arange(len(data2d))
        h5_framenumbers = data2d['frame']
        h5_frame_qfi = result_utils.QuickFrameIndexer(h5_framenumbers)

        if show_progress:
            string_widget = StringWidget()
            objs_per_sec_widget = progressbar.FileTransferSpeed(unit='obj_ids ')
            widgets=[string_widget, objs_per_sec_widget,
                     progressbar.Percentage(), progressbar.Bar(), progressbar.ETA()]
            pbar=progressbar.ProgressBar(widgets=widgets,maxval=len(use_obj_ids)).start()

        for obj_id_enum,obj_id in enumerate(use_obj_ids):
            if show_progress:
                string_widget.set_string( '[obj_id: % 5d]'%obj_id )
                pbar.update(obj_id_enum)
            if show_progress_json and obj_id_enum%100==0:
                rough_percent_done = float(obj_id_enum)/len(use_obj_ids)*100.0
                result_utils.do_json_progress(rough_percent_done)

            obj_3d_rows = h5_context.load_dynamics_free_MLE_position(obj_id)

            if from_source=='smoothed':

                smoothed_rows = None
                try:
                    smoothed_rows = h5_context.load_data(
                        obj_id,
                        use_kalman_smoothing=True,
                        dynamic_model_name = dynamic_model_name,
                        frames_per_second=fps,
                        )
                except core_analysis.NotEnoughDataToSmoothError, err:
                    # OK, we don't have data from this obj_id
                    pass
                except core_analysis.DiscontiguousFramesError:
                    pass

            for this_3d_row in obj_3d_rows:
                # iterate over each sample in the current camera
                framenumber = this_3d_row['frame']
                if start is not None:
                    if not framenumber >= start:
                        continue
                if stop is not None:
                    if not framenumber <= stop:
                        continue
                h5_2d_row_idxs = h5_frame_qfi.get_frame_idxs(framenumber)
                if len(h5_2d_row_idxs) == 0:
                    # At the start, there may be 3d data without 2d data.
                    continue

                if from_source=='ML_estimates':
                    X3d = this_3d_row['x'], this_3d_row['y'], this_3d_row['z']
                elif from_source=='smoothed':
                    if smoothed_rows is None:
                        X3d = np.nan, np.nan, np.nan
                    else:
                        this_smoothed_rows = smoothed_rows[ smoothed_rows['frame']==framenumber ]
                        assert len(this_smoothed_rows) <= 1
                        if len(this_smoothed_rows) == 0:
                            X3d = np.nan, np.nan, np.nan
                        else:
                            X3d = this_smoothed_rows['x'][0], this_smoothed_rows['y'][0], this_smoothed_rows['z'][0]

                # If there was a 3D ML estimate, there must be 2D data.

                frame2d = data2d[h5_2d_row_idxs]
                frame2d_idxs = data2d_idxs[h5_2d_row_idxs]

                obs_2d_idx = this_3d_row['obs_2d_idx']
                kobs_2d_data = ML_estimates_2d_idxs[int(obs_2d_idx)]

                # Parse VLArray.
                this_camns = kobs_2d_data[0::2]
                this_camn_idxs = kobs_2d_data[1::2]

                # Now, for each camera viewing this object at this
                # frame, extract images.
                for camn, camn_pt_no in zip(this_camns, this_camn_idxs):
                    try:
                        cam_id = camn2cam_id[camn]
                    except KeyError:
                        warnings.warn('camn %d not found'%(camn, ))
                        continue

                    # find 2D point corresponding to object
                    cond = ((frame2d['camn']==camn) &
                            (frame2d['frame_pt_idx']==camn_pt_no))
                    idxs = np.nonzero(cond)[0]
                    if len(idxs)==0:
                        #no frame for that camera (start or stop of file)
                        continue
                    elif len(idxs)>1:
                        print "MEGA WARNING MULTIPLE 2D POINTS\n", camn, camn_pt_no,"\n\n"
                        continue

                    idx = idxs[0]

                    frame2d_row = frame2d[idx]
                    x2d_real = frame2d_row['x'], frame2d_row['y']
                    x2d_reproj = R.find2d( cam_id, X3d, distorted = True )
                    dist = np.sqrt(np.sum((x2d_reproj - x2d_real)**2))

                    out['camn'].append(camn)
                    out['frame'].append(framenumber)
                    out['obj_id'].append(obj_id)
                    out['dist'].append(dist)
                    out['z'].append( X3d[2] )
Пример #2
0
def calculate_reprojection_errors(
    h5_filename=None,
    output_h5_filename=None,
    kalman_filename=None,
    from_source=None,
    start=None,
    stop=None,
    show_progress=False,
    show_progress_json=False,
):
    assert from_source in ["ML_estimates", "smoothed"]
    if os.path.exists(output_h5_filename):
        raise RuntimeError("will not overwrite old file '%s'" %
                           output_h5_filename)

    out = {
        "camn": [],
        "frame": [],
        "obj_id": [],
        "dist": [],
        "z": [],
    }

    ca = core_analysis.get_global_CachingAnalyzer()
    with ca.kalman_analysis_context(kalman_filename,
                                    data2d_fname=h5_filename) as h5_context:
        R = h5_context.get_reconstructor()
        ML_estimates_2d_idxs = h5_context.load_entire_table(
            "ML_estimates_2d_idxs")
        use_obj_ids = h5_context.get_unique_obj_ids()

        extra = h5_context.get_extra_info()

        if from_source == "smoothed":
            dynamic_model_name = extra["dynamic_model_name"]
            if dynamic_model_name.startswith("EKF "):
                dynamic_model_name = dynamic_model_name[4:]

        fps = h5_context.get_fps()
        camn2cam_id, cam_id2camns = h5_context.get_caminfo_dicts()

        # associate framenumbers with timestamps using 2d .h5 file
        data2d = h5_context.load_entire_table("data2d_distorted",
                                              from_2d_file=True)
        data2d_idxs = np.arange(len(data2d))
        h5_framenumbers = data2d["frame"]
        h5_frame_qfi = result_utils.QuickFrameIndexer(h5_framenumbers)

        if show_progress:
            string_widget = StringWidget()
            objs_per_sec_widget = progressbar.FileTransferSpeed(
                unit="obj_ids ")
            widgets = [
                string_widget,
                objs_per_sec_widget,
                progressbar.Percentage(),
                progressbar.Bar(),
                progressbar.ETA(),
            ]
            pbar = progressbar.ProgressBar(widgets=widgets,
                                           maxval=len(use_obj_ids)).start()

        for obj_id_enum, obj_id in enumerate(use_obj_ids):
            if show_progress:
                string_widget.set_string("[obj_id: % 5d]" % obj_id)
                pbar.update(obj_id_enum)
            if show_progress_json and obj_id_enum % 100 == 0:
                rough_percent_done = float(obj_id_enum) / len(
                    use_obj_ids) * 100.0
                result_utils.do_json_progress(rough_percent_done)

            obj_3d_rows = h5_context.load_dynamics_free_MLE_position(obj_id)

            if from_source == "smoothed":

                smoothed_rows = None
                try:
                    smoothed_rows = h5_context.load_data(
                        obj_id,
                        use_kalman_smoothing=True,
                        dynamic_model_name=dynamic_model_name,
                        frames_per_second=fps,
                    )
                except core_analysis.NotEnoughDataToSmoothError as err:
                    # OK, we don't have data from this obj_id
                    pass
                except core_analysis.DiscontiguousFramesError:
                    pass

            for this_3d_row in obj_3d_rows:
                # iterate over each sample in the current camera
                framenumber = this_3d_row["frame"]
                if start is not None:
                    if not framenumber >= start:
                        continue
                if stop is not None:
                    if not framenumber <= stop:
                        continue
                h5_2d_row_idxs = h5_frame_qfi.get_frame_idxs(framenumber)
                if len(h5_2d_row_idxs) == 0:
                    # At the start, there may be 3d data without 2d data.
                    continue

                if from_source == "ML_estimates":
                    X3d = this_3d_row["x"], this_3d_row["y"], this_3d_row["z"]
                elif from_source == "smoothed":
                    if smoothed_rows is None:
                        X3d = np.nan, np.nan, np.nan
                    else:
                        this_smoothed_rows = smoothed_rows[
                            smoothed_rows["frame"] == framenumber]
                        assert len(this_smoothed_rows) <= 1
                        if len(this_smoothed_rows) == 0:
                            X3d = np.nan, np.nan, np.nan
                        else:
                            X3d = (
                                this_smoothed_rows["x"][0],
                                this_smoothed_rows["y"][0],
                                this_smoothed_rows["z"][0],
                            )

                # If there was a 3D ML estimate, there must be 2D data.

                frame2d = data2d[h5_2d_row_idxs]
                frame2d_idxs = data2d_idxs[h5_2d_row_idxs]

                obs_2d_idx = this_3d_row["obs_2d_idx"]
                kobs_2d_data = ML_estimates_2d_idxs[int(obs_2d_idx)]

                # Parse VLArray.
                this_camns = kobs_2d_data[0::2]
                this_camn_idxs = kobs_2d_data[1::2]

                # Now, for each camera viewing this object at this
                # frame, extract images.
                for camn, camn_pt_no in zip(this_camns, this_camn_idxs):
                    try:
                        cam_id = camn2cam_id[camn]
                    except KeyError:
                        warnings.warn("camn %d not found" % (camn, ))
                        continue

                    # find 2D point corresponding to object
                    cond = (frame2d["camn"] == camn) & (frame2d["frame_pt_idx"]
                                                        == camn_pt_no)
                    idxs = np.nonzero(cond)[0]
                    if len(idxs) == 0:
                        # no frame for that camera (start or stop of file)
                        continue
                    elif len(idxs) > 1:
                        print(
                            "MEGA WARNING MULTIPLE 2D POINTS\n",
                            camn,
                            camn_pt_no,
                            "\n\n",
                        )
                        continue

                    idx = idxs[0]

                    frame2d_row = frame2d[idx]
                    x2d_real = frame2d_row["x"], frame2d_row["y"]
                    x2d_reproj = R.find2d(cam_id, X3d, distorted=True)
                    dist = np.sqrt(np.sum((x2d_reproj - x2d_real)**2))

                    out["camn"].append(camn)
                    out["frame"].append(framenumber)
                    out["obj_id"].append(obj_id)
                    out["dist"].append(dist)
                    out["z"].append(X3d[2])

    # convert to numpy arrays
    for k in out:
        out[k] = np.array(out[k])
    reprojection = pandas.DataFrame(out)
    del out  # free memory

    # new tables
    camns = []
    cam_ids = []
    for camn in camn2cam_id:
        camns.append(camn)
        cam_ids.append(camn2cam_id[camn])
    cam_table = {
        "camn": np.array(camns),
        "cam_id": np.array(cam_ids),
    }
    cam_df = pandas.DataFrame(cam_table)

    # save to disk
    store = pandas.HDFStore(output_h5_filename)
    store.append("reprojection",
                 reprojection,
                 data_columns=reprojection.columns)
    store.append("cameras", cam_df)
    store.close()
    if show_progress_json:
        result_utils.do_json_progress(100)
def do_it(
    filename,
    efilename,
    use_nth_observation=None,
    h5_2d_data_filename=None,
    use_kalman_data=True,
    start=None,
    stop=None,
    options=None,
):

    if h5_2d_data_filename is None:
        h5_2d_data_filename = filename

    calib_dir = filename + ".recal"
    if not os.path.exists(calib_dir):
        os.makedirs(calib_dir)

    results = result_utils.get_results(filename, mode="r+")

    if use_kalman_data:
        mylocals = {}
        myglobals = {}
        execfile(efilename, myglobals, mylocals)

        use_obj_ids = mylocals["long_ids"]
        if "bad" in mylocals:
            use_obj_ids = set(use_obj_ids)
            bad = set(mylocals["bad"])
            use_obj_ids = list(use_obj_ids.difference(bad))
        kobs = results.root.ML_estimates
        kobs_2d = results.root.ML_estimates_2d_idxs

    h5_2d_data = result_utils.get_results(h5_2d_data_filename, mode="r+")

    camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5_2d_data)

    cam_ids = list(cam_id2camns.keys())
    cam_ids.sort()

    data2d = h5_2d_data.root.data2d_distorted
    # use_idxs = numpy.arange(data2d.nrows)
    frames = data2d.cols.frame[:]
    qfi = result_utils.QuickFrameIndexer(frames)

    npoints_by_ncams = {}

    npoints_by_cam_id = {}
    for cam_id in cam_ids:
        npoints_by_cam_id[cam_id] = 0

    IdMat = []
    points = []

    if use_kalman_data:
        row_keys = []
        if start is not None or stop is not None:
            print("start, stop", start, stop)
            print(
                "WARNING: currently ignoring start/stop because Kalman data is being used"
            )
        for obj_id_enum, obj_id in enumerate(use_obj_ids):
            row_keys.append((len(points), obj_id))
            #            print 'obj_id %d (%d of %d)'%(obj_id, obj_id_enum+1, len(use_obj_ids))
            this_obj_id = obj_id
            k_use_idxs = kobs.get_where_list("obj_id==this_obj_id")
            obs_2d_idxs = kobs.read_coordinates(k_use_idxs, field="obs_2d_idx")
            kframes = kobs.read_coordinates(k_use_idxs, field="frame")
            kframes_use = kframes[::use_nth_observation]
            obs_2d_idxs_use = obs_2d_idxs[::use_nth_observation]

            widgets = [
                "obj_id % 5d (% 3d of % 3d) " %
                (obj_id, obj_id_enum + 1, len(use_obj_ids)),
                progressbar.Percentage(),
                " ",
                progressbar.Bar(),
                " ",
                progressbar.ETA(),
            ]

            pbar = progressbar.ProgressBar(widgets=widgets,
                                           maxval=len(kframes_use)).start()

            for n_kframe, (kframe, obs_2d_idx) in enumerate(
                    zip(kframes_use, obs_2d_idxs_use)):
                pbar.update(n_kframe)
                if 0:
                    k_use_idx = k_use_idxs[n_kframe * use_nth_observation]
                    print(kobs.read_coordinates(numpy.array([k_use_idx])))
                if PT.__version__ <= "1.3.3":
                    obs_2d_idx_find = int(obs_2d_idx)
                    kframe_find = int(kframe)
                else:
                    obs_2d_idx_find = obs_2d_idx
                    kframe_find = kframe
                obj_id_save = int(obj_id)  # convert from possible numpy scalar

                # sys.stdout.write('  reading frame data...')
                # sys.stdout.flush()
                obs_2d_idx_find_next = obs_2d_idx_find + numpy.uint64(1)
                kobs_2d_data = kobs_2d.read(start=obs_2d_idx_find,
                                            stop=obs_2d_idx_find_next)
                # sys.stdout.write('done\n')
                # sys.stdout.flush()

                assert len(kobs_2d_data) == 1
                kobs_2d_data = kobs_2d_data[0]
                this_camns = kobs_2d_data[0::2]
                this_camn_idxs = kobs_2d_data[1::2]

                # sys.stdout.write('  doing frame selections...')
                # sys.stdout.flush()
                if 1:
                    this_use_idxs = qfi.get_frame_idxs(kframe_find)
                elif 0:
                    this_use_idxs = numpy.nonzero(frames == kframe_find)[0]
                else:
                    this_use_idxs = data2d.get_where_list("frame==kframe_find")
                # sys.stdout.write('done\n')
                # sys.stdout.flush()

                if PT.__version__ <= "1.3.3":
                    this_use_idxs = [int(t) for t in this_use_idxs]

                d2d = data2d.read_coordinates(this_use_idxs)
                if len(this_camns) < options.min_num_points:
                    # not enough points to contribute to calibration
                    continue

                npoints_by_ncams[len(this_camns)] = (
                    npoints_by_ncams.get(len(this_camns), 0) + 1)

                IdMat_row, points_row = create_new_row(
                    d2d,
                    this_camns,
                    this_camn_idxs,
                    cam_ids,
                    camn2cam_id,
                    npoints_by_cam_id,
                )

                IdMat.append(IdMat_row)
                points.append(points_row)
            ##             print 'running total of points','-'*20
            ##             for cam_id in cam_ids:
            ##                 print 'cam_id %s: %d points'%(cam_id,npoints_by_cam_id[cam_id])
            ##             print
            pbar.finish()

    if start is None:
        start = 0
    if stop is None:
        stop = int(frames.max())

    if not use_kalman_data:
        row_keys = None
        count = 0
        for frameno in range(start, stop + 1, use_nth_observation):
            this_use_idxs = qfi.get_frame_idxs(frameno)

            d2d = data2d.read_coordinates(this_use_idxs)
            d2d = d2d[~numpy.isnan(d2d["x"])]
            this_camns = d2d["camn"]

            unique_camns = numpy.unique(this_camns)
            if len(this_camns) != len(unique_camns):
                # ambiguity - a camera has > 1 point
                continue
            this_camn_idxs = numpy.array([0] * len(this_camns))

            if len(this_camns) < options.min_num_points:
                # not enough points to contribute to calibration
                continue

            npoints_by_ncams[len(this_camns)] = (
                npoints_by_ncams.get(len(this_camns), 0) + 1)
            count += 1

            IdMat_row, points_row = create_new_row(d2d, this_camns,
                                                   this_camn_idxs, cam_ids,
                                                   camn2cam_id,
                                                   npoints_by_cam_id)
            IdMat.append(IdMat_row)
            points.append(points_row)
    print("%d points" % len(IdMat))

    print("by camera id:")
    for cam_id in cam_ids:
        print(" %s: %d" % (cam_id, npoints_by_cam_id[cam_id]))
    print("by n points:")
    max_npoints = 0
    for ncams in npoints_by_ncams:
        print(" %d: %d" % (ncams, npoints_by_ncams[ncams]))
        max_npoints = max(max_npoints, npoints_by_ncams[ncams])
    print()

    if max_npoints < 10:
        print("not enough points, aborting", file=sys.stderr)
        results.close()
        h5_2d_data.close()
        sys.exit(1)

    IdMat = numpy.array(IdMat, dtype=numpy.uint8).T
    points = numpy.array(points, dtype=numpy.float32).T

    # resolution
    Res = []
    for cam_id in cam_ids:
        image_table = results.root.images
        arr = getattr(image_table, cam_id)
        imsize = arr.shape[1], arr.shape[0]
        Res.append(imsize)
    Res = numpy.array(Res)

    cam_centers = []
    cam_calibrations = []

    if options.camera_center_reconstructor:
        creconstructor = flydra_core.reconstruct.Reconstructor(
            cal_source=options.camera_center_reconstructor)
        cam_centers = numpy.asarray([
            creconstructor.get_camera_center(cam_id)[:, 0]
            for cam_id in cam_ids
        ])
        flydra_core.reconstruct.save_ascii_matrix(
            cam_centers, os.path.join(calib_dir, "original_cam_centers.dat"))

    intrinsics_reconstructor = options.undistort_intrinsics_reconstructor
    if intrinsics_reconstructor and os.path.exists(intrinsics_reconstructor):
        tdir = tempfile.mkdtemp()
        reconstructor = flydra_core.reconstruct.Reconstructor(
            cal_source=intrinsics_reconstructor)
        for i, cam_id in enumerate(cam_ids):
            fname = os.path.join(tdir, "%s.rad" % cam_id)
            scc = reconstructor.get_SingleCameraCalibration(cam_id)
            scc.helper.save_to_rad_file(fname)
            cam_calibrations.append(fname)

    intrinsics_yaml = options.undistort_intrinsics_yaml
    if intrinsics_yaml and os.path.exists(intrinsics_yaml):
        for cam_id in cam_ids:
            fname = os.path.join(intrinsics_yaml, "%s.yaml" % cam_id)
            cam_calibrations.append(fname)

    undo_radial_distortion = len(cam_calibrations) == len(cam_ids)

    mcsc = MultiCamSelfCal(calib_dir)
    mcsc.create_calibration_directory(
        cam_ids=cam_ids,
        IdMat=IdMat,
        points=points,
        Res=Res,
        cam_calibrations=cam_calibrations,
        cam_centers=cam_centers,
        radial_distortion=undo_radial_distortion,
        square_pixels=1,
        num_cameras_fill=options.num_cameras_fill,
    )

    results.close()
    h5_2d_data.close()

    if row_keys is not None:
        row_keys = numpy.array(row_keys)
        flydra_core.reconstruct.save_ascii_matrix(
            row_keys,
            os.path.join(calib_dir, "obj_ids_zero_indexed.dat"),
            isint=True)

    if options.run_mcsc:
        caldir = mcsc.execute(silent=False)
        print("\nfinished: result in ", caldir)
        if options.output_xml:
            fname = os.path.join(caldir, "reconstructor.xml")
            recon = flydra_core.reconstruct.Reconstructor(cal_source=caldir)
            recon.save_to_xml_filename(fname)
            print("\nfinished: new reconstructor in", fname)
Пример #4
0
def retrack_reuse_data_association(
    h5_filename=None,
    output_h5_filename=None,
    kalman_filename=None,
    start=None,
    stop=None,
    less_ram=False,
    show_progress=False,
    show_progress_json=False,
):
    if os.path.exists(output_h5_filename):
        raise RuntimeError("will not overwrite old file '%s'" %
                           output_h5_filename)

    ca = core_analysis.get_global_CachingAnalyzer()
    with ca.kalman_analysis_context(kalman_filename,
                                    data2d_fname=h5_filename) as h5_context:
        R = h5_context.get_reconstructor()
        if less_ram:
            ML_estimates_2d_idxs = h5_context.get_pytable_node(
                'ML_estimates_2d_idxs')
        else:
            ML_estimates_2d_idxs = h5_context.load_entire_table(
                'ML_estimates_2d_idxs')
        use_obj_ids = h5_context.get_unique_obj_ids()
        extra = h5_context.get_extra_info()
        dt = 1.0 / extra['frames_per_second']
        dynamic_model_name = extra['dynamic_model_name']
        kalman_model = dynamic_models.get_kalman_model(name=dynamic_model_name,
                                                       dt=dt)
        kalman_model['max_frames_skipped'] = 2**62  # close to max i64

        fps = extra['frames_per_second']
        camn2cam_id, cam_id2camns = h5_context.get_caminfo_dicts()

        parsed = h5_context.read_textlog_header()
        if 'trigger_CS3' not in parsed:
            parsed['trigger_CS3'] = 'unknown'

        textlog_save_lines = [
            'retrack_reuse_data_association running at %s fps, (top %s, trigger_CS3 %s, flydra_version %s)'
            %
            (str(fps), str(parsed.get('top', 'unknown')),
             str(parsed['trigger_CS3']), flydra_analysis.version.__version__),
            'original file: %s' % (kalman_filename, ),
            'dynamic model: %s' % (dynamic_model_name, ),
            'reconstructor file: %s' % (kalman_filename, ),
        ]

        with open_file_safe(output_h5_filename,
                            mode="w",
                            title="tracked Flydra data file",
                            delete_on_error=True) as output_h5:

            h5saver = KalmanSaver(
                output_h5,
                R,
                cam_id2camns=cam_id2camns,
                min_observations_to_save=0,
                textlog_save_lines=textlog_save_lines,
                dynamic_model_name=dynamic_model_name,
                dynamic_model=kalman_model,
            )

            # associate framenumbers with timestamps using 2d .h5 file
            if less_ram:
                data2d = h5_context.get_pytable_node('data2d_distorted',
                                                     from_2d_file=True)
                h5_framenumbers = data2d.cols.frame[:]
            else:
                data2d = h5_context.load_entire_table('data2d_distorted',
                                                      from_2d_file=True)
                h5_framenumbers = data2d['frame']
            h5_frame_qfi = result_utils.QuickFrameIndexer(h5_framenumbers)

            if show_progress:
                string_widget = StringWidget()
                objs_per_sec_widget = progressbar.FileTransferSpeed(
                    unit='obj_ids ')
                widgets = [
                    string_widget, objs_per_sec_widget,
                    progressbar.Percentage(),
                    progressbar.Bar(),
                    progressbar.ETA()
                ]
                pbar = progressbar.ProgressBar(
                    widgets=widgets, maxval=len(use_obj_ids)).start()

            for obj_id_enum, obj_id in enumerate(use_obj_ids):
                if show_progress:
                    string_widget.set_string('[obj_id: % 5d]' % obj_id)
                    pbar.update(obj_id_enum)
                if show_progress_json and obj_id_enum % 100 == 0:
                    rough_percent_done = float(obj_id_enum) / len(
                        use_obj_ids) * 100.0
                    result_utils.do_json_progress(rough_percent_done)

                tro = None
                first_frame_per_obj = True
                obj_3d_rows = h5_context.load_dynamics_free_MLE_position(
                    obj_id)
                for this_3d_row in obj_3d_rows:
                    # iterate over each sample in the current camera
                    framenumber = this_3d_row['frame']
                    if start is not None:
                        if not framenumber >= start:
                            continue
                    if stop is not None:
                        if not framenumber <= stop:
                            continue
                    h5_2d_row_idxs = h5_frame_qfi.get_frame_idxs(framenumber)
                    if len(h5_2d_row_idxs) == 0:
                        # At the start, there may be 3d data without 2d data.
                        continue

                    # If there was a 3D ML estimate, there must be 2D data.

                    frame2d = data2d[h5_2d_row_idxs]

                    obs_2d_idx = this_3d_row['obs_2d_idx']
                    kobs_2d_data = ML_estimates_2d_idxs[int(obs_2d_idx)]

                    # Parse VLArray.
                    this_camns = kobs_2d_data[0::2]
                    this_camn_idxs = kobs_2d_data[1::2]

                    # Now, for each camera viewing this object at this
                    # frame, extract images.
                    observation_camns = []
                    observation_idxs = []
                    data_dict = {}
                    used_camns_and_idxs = []
                    cam_ids_and_points2d = []

                    for camn, frame_pt_idx in zip(this_camns, this_camn_idxs):
                        try:
                            cam_id = camn2cam_id[camn]
                        except KeyError:
                            warnings.warn('camn %d not found' % (camn, ))
                            continue

                        # find 2D point corresponding to object
                        cond = ((frame2d['camn'] == camn) &
                                (frame2d['frame_pt_idx'] == frame_pt_idx))
                        idxs = np.nonzero(cond)[0]
                        if len(idxs) == 0:
                            #no frame for that camera (start or stop of file)
                            continue
                        elif len(idxs) > 1:
                            print "MEGA WARNING MULTIPLE 2D POINTS\n", camn, frame_pt_idx, "\n\n"
                            continue

                        idx = idxs[0]

                        frame2d_row = frame2d[idx]
                        x2d_real = frame2d_row['x'], frame2d_row['y']
                        pt_undistorted = R.undistort(cam_id, x2d_real)
                        x2d_area = frame2d_row['area']

                        observation_camns.append(camn)
                        observation_idxs.append(idx)
                        candidate_point_list = []
                        data_dict[camn] = candidate_point_list
                        used_camns_and_idxs.append((camn, frame_pt_idx, None))

                        # with no orientation
                        observed_2d = (pt_undistorted[0], pt_undistorted[1],
                                       x2d_area)

                        cam_ids_and_points2d.append((cam_id, observed_2d))

                    if first_frame_per_obj:
                        if len(cam_ids_and_points2d) < 2:
                            warnings.warn(
                                'some 2D data seems to be missing, cannot completely reconstruct'
                            )
                        else:
                            X3d = R.find3d(
                                cam_ids_and_points2d,
                                return_line_coords=False,
                                simulate_via_tracking_dynamic_model=kalman_model
                            )

                            # first frame
                            tro = TrackedObject(
                                R,
                                obj_id,
                                framenumber,
                                X3d,  # obs0_position
                                None,  # obs0_Lcoords
                                observation_camns,  # first_observation_camns
                                observation_idxs,  # first_observation_idxs
                                kalman_model=kalman_model,
                            )
                            del X3d
                            first_frame_per_obj = False
                    else:
                        tro.calculate_a_posteriori_estimate(
                            framenumber,
                            data_dict,
                            camn2cam_id,
                            skip_data_association=True,
                            original_camns_and_idxs=used_camns_and_idxs,
                            original_cam_ids_and_points2d=cam_ids_and_points2d,
                        )

                # done with all data for this obj_id
                if tro is not None:
                    tro.kill()
                    h5saver.save_tro(tro, force_obj_id=obj_id)
    if show_progress_json:
        result_utils.do_json_progress(100)
Пример #5
0
def doit(output_h5_filename=None,
         kalman_filename=None,
         data2d_filename=None,
         start=None,
         stop=None,
         gate_angle_threshold_degrees=40.0,
         area_threshold_for_orientation=0.0,
         obj_only=None,
         options=None):
    gate_angle_threshold_radians = gate_angle_threshold_degrees * D2R

    if options.show:
        import matplotlib.pyplot as plt
        import matplotlib.ticker as mticker

    M = SymobolicModels()
    x = sympy.DeferredVector('x')
    G_symbolic = M.get_observation_model(x)
    dx_symbolic = M.get_process_model(x)

    if 0:
        print 'G_symbolic'
        sympy.pprint(G_symbolic)
        print

    G_linearized = [G_symbolic.diff(x[i]) for i in range(7)]
    if 0:
        print 'G_linearized'
        for i in range(len(G_linearized)):
            sympy.pprint(G_linearized[i])
        print

    arg_tuple_x = (M.P00, M.P01, M.P02, M.P03, M.P10, M.P11, M.P12, M.P13,
                   M.P20, M.P21, M.P22, M.P23, M.Ax, M.Ay, M.Az, x)

    xm = sympy.DeferredVector('xm')
    arg_tuple_x_xm = (M.P00, M.P01, M.P02, M.P03, M.P10, M.P11, M.P12, M.P13,
                      M.P20, M.P21, M.P22, M.P23, M.Ax, M.Ay, M.Az, x, xm)

    eval_G = lambdify(arg_tuple_x, G_symbolic, 'numpy')
    eval_linG = lambdify(arg_tuple_x, G_linearized, 'numpy')

    # coord shift of observation model
    phi_symbolic = M.get_observation_model(xm)

    # H = G - phi
    H_symbolic = G_symbolic - phi_symbolic

    # We still take derivative wrt x (not xm).
    H_linearized = [H_symbolic.diff(x[i]) for i in range(7)]

    eval_phi = lambdify(arg_tuple_x_xm, phi_symbolic, 'numpy')
    eval_H = lambdify(arg_tuple_x_xm, H_symbolic, 'numpy')
    eval_linH = lambdify(arg_tuple_x_xm, H_linearized, 'numpy')

    if 0:
        print 'dx_symbolic'
        sympy.pprint(dx_symbolic)
        print

    eval_dAdt = drop_dims(lambdify(x, dx_symbolic, 'numpy'))

    debug_level = 0
    if debug_level:
        np.set_printoptions(linewidth=130, suppress=True)

    if os.path.exists(output_h5_filename):
        raise RuntimeError("will not overwrite old file '%s'" %
                           output_h5_filename)

    ca = core_analysis.get_global_CachingAnalyzer()
    with open_file_safe(output_h5_filename, mode='w') as output_h5:

        with open_file_safe(kalman_filename, mode='r') as kh5:
            with open_file_safe(data2d_filename, mode='r') as h5:
                for input_node in kh5.root._f_iter_nodes():
                    # copy everything from source to dest
                    input_node._f_copy(output_h5.root, recursive=True)

                try:
                    dest_table = output_h5.root.ML_estimates
                except tables.exceptions.NoSuchNodeError, err1:
                    # backwards compatibility
                    try:
                        dest_table = output_h5.root.kalman_observations
                    except tables.exceptions.NoSuchNodeError, err2:
                        raise err1
                for colname in ['hz_line%d' % i for i in range(6)]:
                    clear_col(dest_table, colname)
                dest_table.flush()

                if options.show:
                    fig1 = plt.figure()
                    ax1 = fig1.add_subplot(511)
                    ax2 = fig1.add_subplot(512, sharex=ax1)
                    ax3 = fig1.add_subplot(513, sharex=ax1)
                    ax4 = fig1.add_subplot(514, sharex=ax1)
                    ax5 = fig1.add_subplot(515, sharex=ax1)
                    ax1.xaxis.set_major_formatter(
                        mticker.FormatStrFormatter("%d"))

                    min_frame_range = np.inf
                    max_frame_range = -np.inf

                reconst = reconstruct.Reconstructor(kh5)

                camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5)
                fps = result_utils.get_fps(h5)
                dt = 1.0 / fps

                used_camn_dict = {}

                # associate framenumbers with timestamps using 2d .h5 file
                data2d = h5.root.data2d_distorted[:]  # load to RAM
                if start is not None:
                    data2d = data2d[data2d['frame'] >= start]
                if stop is not None:
                    data2d = data2d[data2d['frame'] <= stop]
                data2d_idxs = np.arange(len(data2d))
                h5_framenumbers = data2d['frame']
                h5_frame_qfi = result_utils.QuickFrameIndexer(h5_framenumbers)

                ML_estimates_2d_idxs = (kh5.root.ML_estimates_2d_idxs[:])

                all_kobs_obj_ids = dest_table.read(field='obj_id')
                all_kobs_frames = dest_table.read(field='frame')
                use_obj_ids = np.unique(all_kobs_obj_ids)
                if obj_only is not None:
                    use_obj_ids = obj_only

                if hasattr(kh5.root.kalman_estimates.attrs,
                           'dynamic_model_name'):
                    dynamic_model = kh5.root.kalman_estimates.attrs.dynamic_model_name
                    if dynamic_model.startswith('EKF '):
                        dynamic_model = dynamic_model[4:]
                else:
                    dynamic_model = 'mamarama, units: mm'
                    warnings.warn(
                        'could not determine dynamic model name, using "%s"' %
                        dynamic_model)

                for obj_id_enum, obj_id in enumerate(use_obj_ids):
                    # Use data association step from kalmanization to load potentially
                    # relevant 2D orientations, but discard previous 3D orientation.
                    if obj_id_enum % 100 == 0:
                        print 'obj_id %d (%d of %d)' % (obj_id, obj_id_enum,
                                                        len(use_obj_ids))
                    if options.show:
                        all_xhats = []
                        all_ori = []

                    output_row_obj_id_cond = all_kobs_obj_ids == obj_id

                    obj_3d_rows = ca.load_dynamics_free_MLE_position(
                        obj_id, kh5)
                    if start is not None:
                        obj_3d_rows = obj_3d_rows[
                            obj_3d_rows['frame'] >= start]
                    if stop is not None:
                        obj_3d_rows = obj_3d_rows[obj_3d_rows['frame'] <= stop]

                    try:
                        smoothed_3d_rows = ca.load_data(
                            obj_id,
                            kh5,
                            use_kalman_smoothing=True,
                            frames_per_second=fps,
                            dynamic_model_name=dynamic_model)
                    except core_analysis.NotEnoughDataToSmoothError:
                        continue

                    smoothed_frame_qfi = result_utils.QuickFrameIndexer(
                        smoothed_3d_rows['frame'])

                    slopes_by_camn_by_frame = collections.defaultdict(dict)
                    x0d_by_camn_by_frame = collections.defaultdict(dict)
                    y0d_by_camn_by_frame = collections.defaultdict(dict)
                    pt_idx_by_camn_by_frame = collections.defaultdict(dict)
                    min_frame = np.inf
                    max_frame = -np.inf

                    start_idx = None
                    for this_idx, this_3d_row in enumerate(obj_3d_rows):
                        # iterate over each sample in the current camera
                        framenumber = this_3d_row['frame']

                        if not np.isnan(this_3d_row['hz_line0']):
                            # We have a valid initial 3d orientation guess.
                            if framenumber < min_frame:
                                min_frame = framenumber
                                assert start_idx is None, "frames out of order?"
                                start_idx = this_idx

                        max_frame = max(max_frame, framenumber)
                        h5_2d_row_idxs = h5_frame_qfi.get_frame_idxs(
                            framenumber)

                        frame2d = data2d[h5_2d_row_idxs]
                        frame2d_idxs = data2d_idxs[h5_2d_row_idxs]

                        obs_2d_idx = this_3d_row['obs_2d_idx']
                        kobs_2d_data = ML_estimates_2d_idxs[int(obs_2d_idx)]

                        # Parse VLArray.
                        this_camns = kobs_2d_data[0::2]
                        this_camn_idxs = kobs_2d_data[1::2]

                        # Now, for each camera viewing this object at this
                        # frame, extract images.
                        for camn, camn_pt_no in zip(this_camns,
                                                    this_camn_idxs):
                            # find 2D point corresponding to object
                            cam_id = camn2cam_id[camn]

                            cond = ((frame2d['camn'] == camn) &
                                    (frame2d['frame_pt_idx'] == camn_pt_no))
                            idxs = np.nonzero(cond)[0]
                            if len(idxs) == 0:
                                continue
                            assert len(idxs) == 1
                            ## if len(idxs)!=1:
                            ##     raise ValueError('expected one (and only one) frame, got %d'%len(idxs))
                            idx = idxs[0]

                            orig_data2d_rownum = frame2d_idxs[idx]
                            frame_timestamp = frame2d[idx]['timestamp']

                            row = frame2d[idx]
                            assert framenumber == row['frame']
                            if ((row['eccentricity'] <
                                 reconst.minimum_eccentricity)
                                    or (row['area'] <
                                        area_threshold_for_orientation)):
                                slopes_by_camn_by_frame[camn][
                                    framenumber] = np.nan
                                x0d_by_camn_by_frame[camn][
                                    framenumber] = np.nan
                                y0d_by_camn_by_frame[camn][
                                    framenumber] = np.nan
                                pt_idx_by_camn_by_frame[camn][
                                    framenumber] = camn_pt_no
                            else:
                                slopes_by_camn_by_frame[camn][
                                    framenumber] = row['slope']
                                x0d_by_camn_by_frame[camn][framenumber] = row[
                                    'x']
                                y0d_by_camn_by_frame[camn][framenumber] = row[
                                    'y']
                                pt_idx_by_camn_by_frame[camn][
                                    framenumber] = camn_pt_no

                    if start_idx is None:
                        warnings.warn("skipping obj_id %d: "
                                      "could not find valid start frame" %
                                      obj_id)
                        continue

                    obj_3d_rows = obj_3d_rows[start_idx:]

                    # now collect in a numpy array for all cam

                    assert int(min_frame) == min_frame
                    assert int(max_frame + 1) == max_frame + 1
                    frame_range = np.arange(int(min_frame), int(max_frame + 1))
                    if debug_level >= 1:
                        print 'frame range %d-%d' % (frame_range[0],
                                                     frame_range[-1])
                    camn_list = slopes_by_camn_by_frame.keys()
                    camn_list.sort()
                    cam_id_list = [camn2cam_id[camn] for camn in camn_list]
                    n_cams = len(camn_list)
                    n_frames = len(frame_range)

                    save_cols = {}
                    save_cols['frame'] = []
                    for camn in camn_list:
                        save_cols['dist%d' % camn] = []
                        save_cols['used%d' % camn] = []
                        save_cols['theta%d' % camn] = []

                    # NxM array with rows being frames and cols being cameras
                    slopes = np.ones((n_frames, n_cams), dtype=np.float)
                    x0ds = np.ones((n_frames, n_cams), dtype=np.float)
                    y0ds = np.ones((n_frames, n_cams), dtype=np.float)
                    for j, camn in enumerate(camn_list):

                        slopes_by_frame = slopes_by_camn_by_frame[camn]
                        x0d_by_frame = x0d_by_camn_by_frame[camn]
                        y0d_by_frame = y0d_by_camn_by_frame[camn]

                        for frame_idx, absolute_frame_number in enumerate(
                                frame_range):

                            slopes[frame_idx, j] = slopes_by_frame.get(
                                absolute_frame_number, np.nan)
                            x0ds[frame_idx,
                                 j] = x0d_by_frame.get(absolute_frame_number,
                                                       np.nan)
                            y0ds[frame_idx,
                                 j] = y0d_by_frame.get(absolute_frame_number,
                                                       np.nan)

                        if options.show:
                            frf = np.array(frame_range, dtype=np.float)
                            min_frame_range = min(np.min(frf), min_frame_range)
                            max_frame_range = max(np.max(frf), max_frame_range)

                            ax1.plot(frame_range,
                                     slope2modpi(slopes[:, j]),
                                     '.',
                                     label=camn2cam_id[camn])

                    if options.show:
                        ax1.legend()

                    if 1:
                        # estimate orientation of initial frame
                        row0 = obj_3d_rows[:
                                           1]  # take only first row but keep as 1d array
                        hzlines = np.array([
                            row0['hz_line0'], row0['hz_line1'],
                            row0['hz_line2'], row0['hz_line3'],
                            row0['hz_line4'], row0['hz_line5']
                        ]).T
                        directions = reconstruct.line_direction(hzlines)
                        q0 = PQmath.orientation_to_quat(directions[0])
                        assert not np.isnan(
                            q0.x), "cannot start with missing orientation"
                        w0 = 0, 0, 0  # no angular rate
                        init_x = np.array(
                            [w0[0], w0[1], w0[2], q0.x, q0.y, q0.z, q0.w])

                        Pminus = np.zeros((7, 7))

                        # angular rate part of state variance is .5
                        for i in range(0, 3):
                            Pminus[i, i] = .5

                        # quaternion part of state variance is 1
                        for i in range(3, 7):
                            Pminus[i, i] = 1

                    if 1:
                        # setup of noise estimates
                        Q = np.zeros((7, 7))

                        # angular rate part of state variance
                        for i in range(0, 3):
                            Q[i, i] = Q_scalar_rate

                        # quaternion part of state variance
                        for i in range(3, 7):
                            Q[i, i] = Q_scalar_quat

                    preA = np.eye(7)

                    ekf = kalman_ekf.EKF(init_x, Pminus)
                    previous_posterior_x = init_x
                    if options.show:
                        _save_plot_rows = []
                        _save_plot_rows_used = []
                    for frame_idx, absolute_frame_number in enumerate(
                            frame_range):
                        # Evaluate the Jacobian of the process update
                        # using previous frame's posterior estimate. (This
                        # is not quite the same as this frame's prior
                        # estimate. The difference this frame's prior
                        # estimate is _after_ the process update
                        # model. Which we need to get doing this.)

                        if options.show:
                            _save_plot_rows.append(np.nan * np.ones(
                                (n_cams, )))
                            _save_plot_rows_used.append(np.nan * np.ones(
                                (n_cams, )))

                        this_dx = eval_dAdt(previous_posterior_x)
                        A = preA + this_dx * dt
                        if debug_level >= 1:
                            print
                            print 'frame', absolute_frame_number, '-' * 40
                            print 'previous posterior', previous_posterior_x
                            if debug_level > 6:
                                print 'A'
                                print A

                        xhatminus, Pminus = ekf.step1__calculate_a_priori(A, Q)
                        if debug_level >= 1:
                            print 'new prior', xhatminus

                        # 1. Gate per-camera orientations.

                        this_frame_slopes = slopes[frame_idx, :]
                        this_frame_theta_measured = slope2modpi(
                            this_frame_slopes)
                        this_frame_x0d = x0ds[frame_idx, :]
                        this_frame_y0d = y0ds[frame_idx, :]
                        if debug_level >= 5:
                            print 'this_frame_slopes', this_frame_slopes

                        save_cols['frame'].append(absolute_frame_number)
                        for j, camn in enumerate(camn_list):
                            # default to no detection, change below
                            save_cols['dist%d' % camn].append(np.nan)
                            save_cols['used%d' % camn].append(0)
                            save_cols['theta%d' % camn].append(
                                this_frame_theta_measured[j])

                        all_data_this_frame_missing = False
                        gate_vector = None

                        y = []  # observation (per camera)
                        hx = []  # expected observation (per camera)
                        C = []  # linearized observation model (per camera)
                        N_obs_this_frame = 0
                        cams_without_data = np.isnan(this_frame_slopes)
                        if np.all(cams_without_data):
                            all_data_this_frame_missing = True

                        smoothed_pos_idxs = smoothed_frame_qfi.get_frame_idxs(
                            absolute_frame_number)
                        if len(smoothed_pos_idxs) == 0:
                            all_data_this_frame_missing = True
                            smoothed_pos_idx = None
                            smooth_row = None
                            center_position = None
                        else:
                            try:
                                assert len(smoothed_pos_idxs) == 1
                            except:
                                print 'obj_id', obj_id
                                print 'absolute_frame_number', absolute_frame_number
                                if len(frame_range):
                                    print 'frame_range[0],frame_rang[-1]', frame_range[
                                        0], frame_range[-1]
                                else:
                                    print 'no frame range'
                                print 'len(smoothed_pos_idxs)', len(
                                    smoothed_pos_idxs)
                                raise
                            smoothed_pos_idx = smoothed_pos_idxs[0]
                            smooth_row = smoothed_3d_rows[smoothed_pos_idx]
                            assert smooth_row['frame'] == absolute_frame_number
                            center_position = np.array(
                                (smooth_row['x'], smooth_row['y'],
                                 smooth_row['z']))
                            if debug_level >= 2:
                                print 'center_position', center_position

                        if not all_data_this_frame_missing:
                            if expected_orientation_method == 'trust_prior':
                                state_for_phi = xhatminus  # use a priori
                            elif expected_orientation_method == 'SVD_line_fits':
                                # construct matrix of planes
                                P = []
                                for camn_idx in range(n_cams):
                                    this_x0d = this_frame_x0d[camn_idx]
                                    this_y0d = this_frame_y0d[camn_idx]
                                    slope = this_frame_slopes[camn_idx]
                                    plane, ray = reconst.get_3D_plane_and_ray(
                                        cam_id, this_x0d, this_y0d, slope)
                                    if np.isnan(plane[0]):
                                        continue
                                    P.append(plane)
                                if len(P) < 2:
                                    # not enough data to do SVD... fallback to prior
                                    state_for_phi = xhatminus  # use a priori
                                else:
                                    Lco = reconstruct.intersect_planes_to_find_line(
                                        P)
                                    q = PQmath.pluecker_to_quat(Lco)
                                    state_for_phi = cgtypes_quat2statespace(q)

                            cams_with_data = ~cams_without_data
                            possible_cam_idxs = np.nonzero(cams_with_data)[0]
                            if debug_level >= 6:
                                print 'possible_cam_idxs', possible_cam_idxs
                            gate_vector = np.zeros((n_cams, ), dtype=np.bool)
                            ## flip_vector = np.zeros( (n_cams,), dtype=np.bool)
                            for camn_idx in possible_cam_idxs:
                                cam_id = cam_id_list[camn_idx]
                                camn = camn_list[camn_idx]

                                # This ignores distortion. To incorporate
                                # distortion, this would require
                                # appropriate scaling of orientation
                                # vector, which would require knowing
                                # target's size. In which case we should
                                # track head and tail separately and not
                                # use this whole quaternion mess.

                                ## theta_measured=slope2modpi(
                                ##     this_frame_slopes[camn_idx])
                                theta_measured = this_frame_theta_measured[
                                    camn_idx]
                                if debug_level >= 6:
                                    print 'cam_id %s, camn %d' % (cam_id, camn)
                                if debug_level >= 3:
                                    a = reconst.find2d(cam_id, center_position)
                                    other_position = get_point_on_line(
                                        xhatminus, center_position)
                                    b = reconst.find2d(cam_id, other_position)
                                    theta_expected = find_theta_mod_pi_between_points(
                                        a, b)
                                    print('  theta_expected,theta_measured',
                                          theta_expected * R2D,
                                          theta_measured * R2D)

                                P = reconst.get_pmat(cam_id)
                                if 0:
                                    args_x = (P[0, 0], P[0, 1], P[0, 2],
                                              P[0, 3], P[1, 0], P[1, 1], P[1,
                                                                           2],
                                              P[1, 3], P[2, 0], P[2, 1], P[2,
                                                                           2],
                                              P[2, 3], center_position[0],
                                              center_position[1],
                                              center_position[2], xhatminus)
                                    this_y = theta_measured
                                    this_hx = eval_G(*args_x)
                                    this_C = eval_linG(*args_x)
                                else:
                                    args_x_xm = (P[0, 0], P[0, 1], P[0, 2],
                                                 P[0, 3], P[1, 0], P[1,
                                                                     1], P[1,
                                                                           2],
                                                 P[1, 3], P[2, 0], P[2,
                                                                     1], P[2,
                                                                           2],
                                                 P[2, 3], center_position[0],
                                                 center_position[1],
                                                 center_position[2], xhatminus,
                                                 state_for_phi)
                                    this_phi = eval_phi(*args_x_xm)
                                    this_y = angle_diff(theta_measured,
                                                        this_phi,
                                                        mod_pi=True)
                                    this_hx = eval_H(*args_x_xm)
                                    this_C = eval_linH(*args_x_xm)
                                    if debug_level >= 3:
                                        print('  this_phi,this_y',
                                              this_phi * R2D, this_y * R2D)

                                save_cols['dist%d' % camn][-1] = this_y  # save

                                # gate
                                if abs(this_y) < gate_angle_threshold_radians:
                                    save_cols['used%d' % camn][-1] = 1
                                    gate_vector[camn_idx] = 1
                                    if debug_level >= 3:
                                        print '    good'
                                    if options.show:
                                        _save_plot_rows_used[-1][
                                            camn_idx] = this_y
                                    y.append(this_y)
                                    hx.append(this_hx)
                                    C.append(this_C)
                                    N_obs_this_frame += 1

                                    # Save which camn and camn_pt_no was used.
                                    if absolute_frame_number not in used_camn_dict:
                                        used_camn_dict[
                                            absolute_frame_number] = []
                                    camn_pt_no = (pt_idx_by_camn_by_frame[camn]
                                                  [absolute_frame_number])
                                    used_camn_dict[
                                        absolute_frame_number].append(
                                            (camn, camn_pt_no))
                                else:
                                    if options.show:
                                        _save_plot_rows[-1][camn_idx] = this_y
                                    if debug_level >= 6:
                                        print '    bad'
                            if debug_level >= 1:
                                print 'gate_vector', gate_vector
                                #print 'flip_vector',flip_vector
                            all_data_this_frame_missing = not bool(
                                np.sum(gate_vector))

                        # 3. Construct observations model using all
                        # gated-in camera orientations.

                        if all_data_this_frame_missing:
                            C = None
                            R = None
                            hx = None
                        else:
                            C = np.array(C)
                            R = R_scalar * np.eye(N_obs_this_frame)
                            hx = np.array(hx)
                            if 0:
                                # crazy observation error scaling
                                for i in range(N_obs_this_frame):
                                    beyond = abs(y[i]) - 10 * D2R
                                    beyond = max(0, beyond)  # clip at zero
                                    R[i:i] = R_scalar * (1 + 10 * beyond)
                            if debug_level >= 6:
                                print 'full values'
                                print 'C', C
                                print 'hx', hx
                                print 'y', y
                                print 'R', R

                        if debug_level >= 1:
                            print 'all_data_this_frame_missing', all_data_this_frame_missing
                        xhat, P = ekf.step2__calculate_a_posteriori(
                            xhatminus,
                            Pminus,
                            y=y,
                            hx=hx,
                            C=C,
                            R=R,
                            missing_data=all_data_this_frame_missing)
                        if debug_level >= 1:
                            print 'xhat', xhat
                        previous_posterior_x = xhat
                        if center_position is not None:
                            # save
                            output_row_frame_cond = all_kobs_frames == absolute_frame_number
                            output_row_cond = output_row_frame_cond & output_row_obj_id_cond
                            output_idxs = np.nonzero(output_row_cond)[0]
                            if len(output_idxs) == 0:
                                pass
                            else:
                                assert len(output_idxs) == 1
                                idx = output_idxs[0]
                                hz = state_to_hzline(xhat, center_position)
                                for row in dest_table.iterrows(start=idx,
                                                               stop=(idx + 1)):
                                    for i in range(6):
                                        row['hz_line%d' % i] = hz[i]
                                    row.update()
                        ## xhat_results[ obj_id ][absolute_frame_number ] = (
                        ##     xhat,center_position)
                        if options.show:
                            all_xhats.append(xhat)
                            all_ori.append(state_to_ori(xhat))

                    # save to H5 file
                    names = [colname for colname in save_cols]
                    names.sort()
                    arrays = []
                    for name in names:
                        if name == 'frame':
                            dtype = np.int64
                        elif name.startswith('dist'):
                            dtype = np.float32
                        elif name.startswith('used'):
                            dtype = np.bool
                        elif name.startswith('theta'):
                            dtype = np.float32
                        else:
                            raise NameError('unknown name %s' % name)
                        arr = np.array(save_cols[name], dtype=dtype)
                        arrays.append(arr)
                    save_recarray = np.rec.fromarrays(arrays, names=names)
                    h5group = core_analysis.get_group_for_obj(obj_id,
                                                              output_h5,
                                                              writeable=True)
                    output_h5.create_table(h5group,
                                           'obj%d' % obj_id,
                                           save_recarray,
                                           filters=tables.Filters(
                                               1, complib='lzo'))

                    if options.show:
                        all_xhats = np.array(all_xhats)
                        all_ori = np.array(all_ori)
                        _save_plot_rows = np.array(_save_plot_rows)
                        _save_plot_rows_used = np.array(_save_plot_rows_used)

                        ax2.plot(frame_range, all_xhats[:, 0], '.', label='p')
                        ax2.plot(frame_range, all_xhats[:, 1], '.', label='q')
                        ax2.plot(frame_range, all_xhats[:, 2], '.', label='r')
                        ax2.legend()

                        ax3.plot(frame_range, all_xhats[:, 3], '.', label='a')
                        ax3.plot(frame_range, all_xhats[:, 4], '.', label='b')
                        ax3.plot(frame_range, all_xhats[:, 5], '.', label='c')
                        ax3.plot(frame_range, all_xhats[:, 6], '.', label='d')
                        ax3.legend()

                        ax4.plot(frame_range, all_ori[:, 0], '.', label='x')
                        ax4.plot(frame_range, all_ori[:, 1], '.', label='y')
                        ax4.plot(frame_range, all_ori[:, 2], '.', label='z')
                        ax4.legend()

                        colors = []
                        for i in range(n_cams):
                            line, = ax5.plot(frame_range,
                                             _save_plot_rows_used[:, i] * R2D,
                                             'o',
                                             label=cam_id_list[i])
                            colors.append(line.get_color())
                        for i in range(n_cams):
                            # loop again to get normal MPL color cycling
                            ax5.plot(frame_range,
                                     _save_plot_rows[:, i] * R2D,
                                     'o',
                                     mec=colors[i],
                                     ms=1.0)
                        ax5.set_ylabel('observation (deg)')
                        ax5.legend()
Пример #6
0
def doit(
    h5_filename=None,
    output_h5_filename=None,
    ufmf_filenames=None,
    kalman_filename=None,
    start=None,
    stop=None,
    view=None,
    erode=0,
    save_images=False,
    save_image_dir=None,
    intermediate_thresh_frac=None,
    final_thresh=None,
    stack_N_images=None,
    stack_N_images_min=None,
    old_sync_timestamp_source=False,
    do_rts_smoothing=True,
):
    """

    Copy all data in .h5 file (specified by h5_filename) to a new .h5
    file in which orientations are set based on image analysis of
    .ufmf files. Tracking data to associate 2D points from subsequent
    frames is read from the .h5 kalman file specified by
    kalman_filename.

    """
    if view is None:
        view = ["orig" for f in ufmf_filenames]
    else:
        assert len(view) == len(ufmf_filenames)

    if intermediate_thresh_frac is None or final_thresh is None:
        raise ValueError("intermediate_thresh_frac and final_thresh must be "
                         "set")

    filename2view = dict(zip(ufmf_filenames, view))

    ca = core_analysis.get_global_CachingAnalyzer()
    obj_ids, use_obj_ids, is_mat_file, data_file, extra = ca.initial_file_load(
        kalman_filename)
    try:
        ML_estimates_2d_idxs = data_file.root.ML_estimates_2d_idxs[:]
    except tables.exceptions.NoSuchNodeError as err1:
        # backwards compatibility
        try:
            ML_estimates_2d_idxs = data_file.root.kalman_observations_2d_idxs[:]
        except tables.exceptions.NoSuchNodeError as err2:
            raise err1

    if os.path.exists(output_h5_filename):
        raise RuntimeError("will not overwrite old file '%s'" %
                           output_h5_filename)
    with open_file_safe(output_h5_filename, delete_on_error=True,
                        mode="w") as output_h5:
        if save_image_dir is not None:
            if not os.path.exists(save_image_dir):
                os.mkdir(save_image_dir)

        with open_file_safe(h5_filename, mode="r") as h5:

            fps = result_utils.get_fps(h5, fail_on_error=True)

            for input_node in h5.root._f_iter_nodes():
                # copy everything from source to dest
                input_node._f_copy(output_h5.root, recursive=True)
            print("done copying")

            # Clear values in destination table that we may overwrite.
            dest_table = output_h5.root.data2d_distorted
            for colname in [
                    "x",
                    "y",
                    "area",
                    "slope",
                    "eccentricity",
                    "cur_val",
                    "mean_val",
                    "sumsqf_val",
            ]:
                if colname == "cur_val":
                    fill_value = 0
                else:
                    fill_value = np.nan
                clear_col(dest_table, colname, fill_value=fill_value)
            dest_table.flush()
            print("done clearing")

            camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5)

            cam_id2fmfs = collections.defaultdict(list)
            cam_id2view = {}
            for ufmf_filename in ufmf_filenames:
                fmf = ufmf.FlyMovieEmulator(
                    ufmf_filename,
                    # darken=-50,
                    allow_no_such_frame_errors=True,
                )
                timestamps = fmf.get_all_timestamps()

                cam_id = get_cam_id_from_filename(fmf.filename,
                                                  cam_id2camns.keys())
                cam_id2fmfs[cam_id].append(
                    (fmf, result_utils.Quick1DIndexer(timestamps)))

                cam_id2view[cam_id] = filename2view[fmf.filename]

            # associate framenumbers with timestamps using 2d .h5 file
            data2d = h5.root.data2d_distorted[:]  # load to RAM
            data2d_idxs = np.arange(len(data2d))
            h5_framenumbers = data2d["frame"]
            h5_frame_qfi = result_utils.QuickFrameIndexer(h5_framenumbers)

            fpc = realtime_image_analysis.FitParamsClass(
            )  # allocate FitParamsClass

            for obj_id_enum, obj_id in enumerate(use_obj_ids):
                print("object %d of %d" % (obj_id_enum, len(use_obj_ids)))

                # get all images for this camera and this obj_id

                obj_3d_rows = ca.load_dynamics_free_MLE_position(
                    obj_id, data_file)

                this_obj_framenumbers = collections.defaultdict(list)
                if save_images:
                    this_obj_raw_images = collections.defaultdict(list)
                    this_obj_mean_images = collections.defaultdict(list)
                this_obj_absdiff_images = collections.defaultdict(list)
                this_obj_morphed_images = collections.defaultdict(list)
                this_obj_morph_failures = collections.defaultdict(list)
                this_obj_im_coords = collections.defaultdict(list)
                this_obj_com_coords = collections.defaultdict(list)
                this_obj_camn_pt_no = collections.defaultdict(list)

                for this_3d_row in obj_3d_rows:
                    # iterate over each sample in the current camera
                    framenumber = this_3d_row["frame"]
                    if start is not None:
                        if not framenumber >= start:
                            continue
                    if stop is not None:
                        if not framenumber <= stop:
                            continue
                    h5_2d_row_idxs = h5_frame_qfi.get_frame_idxs(framenumber)

                    frame2d = data2d[h5_2d_row_idxs]
                    frame2d_idxs = data2d_idxs[h5_2d_row_idxs]

                    obs_2d_idx = this_3d_row["obs_2d_idx"]
                    kobs_2d_data = ML_estimates_2d_idxs[int(obs_2d_idx)]

                    # Parse VLArray.
                    this_camns = kobs_2d_data[0::2]
                    this_camn_idxs = kobs_2d_data[1::2]

                    # Now, for each camera viewing this object at this
                    # frame, extract images.
                    for camn, camn_pt_no in zip(this_camns, this_camn_idxs):

                        # find 2D point corresponding to object
                        cam_id = camn2cam_id[camn]

                        movie_tups_for_this_camn = cam_id2fmfs[cam_id]
                        cond = (frame2d["camn"] == camn) & (
                            frame2d["frame_pt_idx"] == camn_pt_no)
                        idxs = np.nonzero(cond)[0]
                        assert len(idxs) == 1
                        idx = idxs[0]

                        orig_data2d_rownum = frame2d_idxs[idx]

                        if not old_sync_timestamp_source:
                            # Change the next line to 'timestamp' for old
                            # data (before May/June 2009 -- the switch to
                            # fview_ext_trig)
                            frame_timestamp = frame2d[idx][
                                "cam_received_timestamp"]
                        else:
                            # previous version
                            frame_timestamp = frame2d[idx]["timestamp"]
                        found = None
                        for fmf, fmf_timestamp_qi in movie_tups_for_this_camn:
                            fmf_fnos = fmf_timestamp_qi.get_idxs(
                                frame_timestamp)
                            if not len(fmf_fnos):
                                continue
                            assert len(fmf_fnos) == 1

                            # should only be one .ufmf with this frame and cam_id
                            assert found is None

                            fmf_fno = fmf_fnos[0]
                            found = (fmf, fmf_fno)
                        if found is None:
                            print(
                                "no image data for frame timestamp %s cam_id %s"
                                % (repr(frame_timestamp), cam_id))
                            continue
                        fmf, fmf_fno = found
                        image, fmf_timestamp = fmf.get_frame(fmf_fno)
                        mean_image = fmf.get_mean_for_timestamp(fmf_timestamp)
                        coding = fmf.get_format()
                        if imops.is_coding_color(coding):
                            image = imops.to_rgb8(coding, image)
                            mean_image = imops.to_rgb8(coding, mean_image)
                        else:
                            image = imops.to_mono8(coding, image)
                            mean_image = imops.to_mono8(coding, mean_image)

                        xy = (
                            int(round(frame2d[idx]["x"])),
                            int(round(frame2d[idx]["y"])),
                        )
                        maxsize = (fmf.get_width(), fmf.get_height())

                        # Accumulate cropped images. Note that the region
                        # of the full image that the cropped image
                        # occupies changes over time as the tracked object
                        # moves. Thus, averaging these cropped-and-shifted
                        # images is not the same as simply averaging the
                        # full frame.

                        roiradius = 25
                        warnings.warn(
                            "roiradius hard-coded to %d: could be set "
                            "from 3D tracking" % roiradius)
                        tmp = clip_and_math(image, mean_image, xy, roiradius,
                                            maxsize)
                        im_coords, raw_im, mean_im, absdiff_im = tmp

                        max_absdiff_im = absdiff_im.max()
                        intermediate_thresh = intermediate_thresh_frac * max_absdiff_im
                        absdiff_im[absdiff_im <= intermediate_thresh] = 0

                        if erode > 0:
                            morphed_im = scipy.ndimage.grey_erosion(absdiff_im,
                                                                    size=erode)
                            ## morphed_im = scipy.ndimage.binary_erosion(absdiff_im>1).astype(np.float32)*255.0
                        else:
                            morphed_im = absdiff_im

                        y0_roi, x0_roi = scipy.ndimage.center_of_mass(
                            morphed_im)
                        x0 = im_coords[0] + x0_roi
                        y0 = im_coords[1] + y0_roi

                        if 1:
                            morphed_im_binary = morphed_im > 0
                            labels, n_labels = scipy.ndimage.label(
                                morphed_im_binary)
                            morph_fail_because_multiple_blobs = False

                            if n_labels > 1:
                                x0, y0 = np.nan, np.nan
                                # More than one blob -- don't allow image.
                                if 1:
                                    # for min flattening
                                    morphed_im = np.empty(morphed_im.shape,
                                                          dtype=np.uint8)
                                    morphed_im.fill(255)
                                    morph_fail_because_multiple_blobs = True
                                else:
                                    # for mean flattening
                                    morphed_im = np.zeros_like(morphed_im)
                                    morph_fail_because_multiple_blobs = True

                        this_obj_framenumbers[camn].append(framenumber)
                        if save_images:
                            this_obj_raw_images[camn].append(
                                (raw_im, im_coords))
                            this_obj_mean_images[camn].append(mean_im)
                        this_obj_absdiff_images[camn].append(absdiff_im)
                        this_obj_morphed_images[camn].append(morphed_im)
                        this_obj_morph_failures[camn].append(
                            morph_fail_because_multiple_blobs)
                        this_obj_im_coords[camn].append(im_coords)
                        this_obj_com_coords[camn].append((x0, y0))
                        this_obj_camn_pt_no[camn].append(orig_data2d_rownum)
                        if 0:
                            fname = "obj%05d_%s_frame%07d_pt%02d.png" % (
                                obj_id,
                                cam_id,
                                framenumber,
                                camn_pt_no,
                            )
                            plot_image_subregion(
                                raw_im,
                                mean_im,
                                absdiff_im,
                                roiradius,
                                fname,
                                im_coords,
                                view=filename2view[fmf.filename],
                            )

                # Now, all the frames from all cameras for this obj_id
                # have been gathered. Do a camera-by-camera analysis.
                for camn in this_obj_absdiff_images:
                    cam_id = camn2cam_id[camn]
                    image_framenumbers = np.array(this_obj_framenumbers[camn])
                    if save_images:
                        raw_images = this_obj_raw_images[camn]
                        mean_images = this_obj_mean_images[camn]
                    absdiff_images = this_obj_absdiff_images[camn]
                    morphed_images = this_obj_morphed_images[camn]
                    morph_failures = np.array(this_obj_morph_failures[camn])
                    im_coords = this_obj_im_coords[camn]
                    com_coords = this_obj_com_coords[camn]
                    camn_pt_no_array = this_obj_camn_pt_no[camn]

                    all_framenumbers = np.arange(image_framenumbers[0],
                                                 image_framenumbers[-1] + 1)

                    com_coords = np.array(com_coords)
                    if do_rts_smoothing:
                        # Perform RTS smoothing on center-of-mass coordinates.

                        # Find first good datum.
                        fgnz = np.nonzero(~np.isnan(com_coords[:, 0]))
                        com_coords_smooth = np.empty(com_coords.shape,
                                                     dtype=np.float)
                        com_coords_smooth.fill(np.nan)

                        if len(fgnz[0]):
                            first_good = fgnz[0][0]

                            RTS_com_coords = com_coords[first_good:, :]

                            # Setup parameters for Kalman filter.
                            dt = 1.0 / fps
                            A = np.array(
                                [
                                    [1, 0, dt, 0],  # process update
                                    [0, 1, 0, dt],
                                    [0, 0, 1, 0],
                                    [0, 0, 0, 1],
                                ],
                                dtype=np.float,
                            )
                            C = np.array(
                                [[1, 0, 0, 0], [0, 1, 0, 0]
                                 ],  # observation matrix
                                dtype=np.float,
                            )
                            Q = 0.1 * np.eye(4)  # process noise
                            R = 1.0 * np.eye(2)  # observation noise
                            initx = np.array(
                                [
                                    RTS_com_coords[0, 0], RTS_com_coords[0, 1],
                                    0, 0
                                ],
                                dtype=np.float,
                            )
                            initV = 2 * np.eye(4)
                            initV[0, 0] = 0.1
                            initV[1, 1] = 0.1
                            y = RTS_com_coords
                            xsmooth, Vsmooth = adskalman.adskalman.kalman_smoother(
                                y, A, C, Q, R, initx, initV)
                            com_coords_smooth[first_good:] = xsmooth[:, :2]

                        # Now shift images

                        image_shift = com_coords_smooth - com_coords
                        bad_cond = np.isnan(image_shift[:, 0])
                        # broadcast zeros to places where no good tracking
                        image_shift[bad_cond, 0] = 0
                        image_shift[bad_cond, 1] = 0

                        shifted_morphed_images = [
                            shift_image(im, xy)
                            for im, xy in zip(morphed_images, image_shift)
                        ]

                        results = flatten_image_stack(
                            image_framenumbers,
                            shifted_morphed_images,
                            im_coords,
                            camn_pt_no_array,
                            N=stack_N_images,
                        )
                    else:
                        results = flatten_image_stack(
                            image_framenumbers,
                            morphed_images,
                            im_coords,
                            camn_pt_no_array,
                            N=stack_N_images,
                        )

                    # The variable fno (the first element of the results
                    # tuple) is guaranteed to be contiguous and to span
                    # the range from the first to last frames available.

                    for (
                            fno,
                            av_im,
                            lowerleft,
                            orig_data2d_rownum,
                            orig_idx,
                            orig_idxs_in_average,
                    ) in results:

                        # Clip image to reduce moment arms.
                        av_im[av_im <= final_thresh] = 0

                        fail_fit = False
                        fast_av_im = FastImage.asfastimage(
                            av_im.astype(np.uint8))
                        try:
                            (x0_roi, y0_roi, area, slope,
                             eccentricity) = fpc.fit(fast_av_im)
                        except realtime_image_analysis.FitParamsError as err:
                            fail_fit = True

                        this_morph_failures = morph_failures[
                            orig_idxs_in_average]
                        n_failed_images = np.sum(this_morph_failures)
                        n_good_images = stack_N_images - n_failed_images
                        if n_good_images >= stack_N_images_min:
                            n_images_is_acceptable = True
                        else:
                            n_images_is_acceptable = False

                        if fail_fit:
                            x0_roi = np.nan
                            y0_roi = np.nan
                            area, slope, eccentricity = np.nan, np.nan, np.nan

                        if not n_images_is_acceptable:
                            x0_roi = np.nan
                            y0_roi = np.nan
                            area, slope, eccentricity = np.nan, np.nan, np.nan

                        x0 = x0_roi + lowerleft[0]
                        y0 = y0_roi + lowerleft[1]

                        if 1:
                            for row in dest_table.iterrows(
                                    start=orig_data2d_rownum,
                                    stop=orig_data2d_rownum + 1):

                                row["x"] = x0
                                row["y"] = y0
                                row["area"] = area
                                row["slope"] = slope
                                row["eccentricity"] = eccentricity
                                row.update()  # save data

                        if save_images:
                            # Display debugging images
                            fname = "av_obj%05d_%s_frame%07d.png" % (
                                obj_id,
                                cam_id,
                                fno,
                            )
                            if save_image_dir is not None:
                                fname = os.path.join(save_image_dir, fname)

                            raw_im, raw_coords = raw_images[orig_idx]
                            mean_im = mean_images[orig_idx]
                            absdiff_im = absdiff_images[orig_idx]
                            morphed_im = morphed_images[orig_idx]
                            raw_l, raw_b = raw_coords[:2]

                            imh, imw = raw_im.shape[:2]
                            n_ims = 5

                            if 1:
                                # increase contrast
                                contrast_scale = 2.0
                                av_im_show = np.clip(av_im * contrast_scale, 0,
                                                     255)

                            margin = 10
                            scale = 3

                            # calculate the orientation line
                            yintercept = y0 - slope * x0
                            xplt = np.array([
                                lowerleft[0] - 5,
                                lowerleft[0] + av_im_show.shape[1] + 5,
                            ])
                            yplt = slope * xplt + yintercept
                            if 1:
                                # only send non-nan values to plot
                                plt_good = ~np.isnan(xplt) & ~np.isnan(yplt)
                                xplt = xplt[plt_good]
                                yplt = yplt[plt_good]

                            top_row_width = scale * imw * n_ims + (
                                1 + n_ims) * margin
                            SHOW_STACK = True
                            if SHOW_STACK:
                                n_stack_rows = 4
                                rw = scale * imw * stack_N_images + (
                                    1 + n_ims) * margin
                                row_width = max(top_row_width, rw)
                                col_height = (n_stack_rows * scale * imh +
                                              (n_stack_rows + 1) * margin)
                                stack_margin = 20
                            else:
                                row_width = top_row_width
                                col_height = scale * imh + 2 * margin
                                stack_margin = 0

                            canv = benu.Canvas(
                                fname,
                                row_width,
                                col_height + stack_margin,
                                color_rgba=(1, 1, 1, 1),
                            )

                            if SHOW_STACK:
                                for (stacki, s_orig_idx
                                     ) in enumerate(orig_idxs_in_average):

                                    (s_raw_im,
                                     s_raw_coords) = raw_images[s_orig_idx]
                                    s_raw_l, s_raw_b = s_raw_coords[:2]
                                    s_imh, s_imw = s_raw_im.shape[:2]
                                    user_rect = (s_raw_l, s_raw_b, s_imw,
                                                 s_imh)

                                    x_display = (stacki + 1) * margin + (
                                        scale * imw) * stacki
                                    for show in ["raw", "absdiff", "morphed"]:
                                        if show == "raw":
                                            y_display = scale * imh + 2 * margin
                                        elif show == "absdiff":
                                            y_display = 2 * scale * imh + 3 * margin
                                        elif show == "morphed":
                                            y_display = 3 * scale * imh + 4 * margin
                                        display_rect = (
                                            x_display,
                                            y_display + stack_margin,
                                            scale * raw_im.shape[1],
                                            scale * raw_im.shape[0],
                                        )

                                        with canv.set_user_coords(
                                                display_rect,
                                                user_rect,
                                                transform=cam_id2view[cam_id],
                                        ):

                                            if show == "raw":
                                                s_im = s_raw_im.astype(
                                                    np.uint8)
                                            elif show == "absdiff":
                                                tmp = absdiff_images[
                                                    s_orig_idx]
                                                s_im = tmp.astype(np.uint8)
                                            elif show == "morphed":
                                                tmp = morphed_images[
                                                    s_orig_idx]
                                                s_im = tmp.astype(np.uint8)

                                            canv.imshow(s_im, s_raw_l, s_raw_b)
                                            sx0, sy0 = com_coords[s_orig_idx]
                                            X = [sx0]
                                            Y = [sy0]
                                            # the raw coords in red
                                            canv.scatter(X,
                                                         Y,
                                                         color_rgba=(1, 0.5,
                                                                     0.5, 1))

                                            if do_rts_smoothing:
                                                sx0, sy0 = com_coords_smooth[
                                                    s_orig_idx]
                                                X = [sx0]
                                                Y = [sy0]
                                                # the RTS smoothed coords in green
                                                canv.scatter(
                                                    X,
                                                    Y,
                                                    color_rgba=(0.5, 1, 0.5,
                                                                1))

                                            if s_orig_idx == orig_idx:
                                                boxx = np.array([
                                                    s_raw_l,
                                                    s_raw_l,
                                                    s_raw_l + s_imw,
                                                    s_raw_l + s_imw,
                                                    s_raw_l,
                                                ])
                                                boxy = np.array([
                                                    s_raw_b,
                                                    s_raw_b + s_imh,
                                                    s_raw_b + s_imh,
                                                    s_raw_b,
                                                    s_raw_b,
                                                ])
                                                canv.plot(
                                                    boxx,
                                                    boxy,
                                                    color_rgba=(0.5, 1, 0.5,
                                                                1),
                                                )
                                        if show == "morphed":
                                            canv.text(
                                                "morphed %d" %
                                                (s_orig_idx - orig_idx, ),
                                                display_rect[0],
                                                (display_rect[1] +
                                                 display_rect[3] +
                                                 stack_margin - 20),
                                                font_size=font_size,
                                                color_rgba=(1, 0, 0, 1),
                                            )

                            # Display raw_im
                            display_rect = (
                                margin,
                                margin,
                                scale * raw_im.shape[1],
                                scale * raw_im.shape[0],
                            )
                            user_rect = (raw_l, raw_b, imw, imh)
                            with canv.set_user_coords(
                                    display_rect,
                                    user_rect,
                                    transform=cam_id2view[cam_id],
                            ):
                                canv.imshow(raw_im.astype(np.uint8), raw_l,
                                            raw_b)
                                canv.plot(
                                    xplt, yplt,
                                    color_rgba=(0, 1, 0,
                                                0.5))  # the orientation line
                            canv.text(
                                "raw",
                                display_rect[0],
                                display_rect[1] + display_rect[3],
                                font_size=font_size,
                                color_rgba=(0.5, 0.5, 0.9, 1),
                                shadow_offset=1,
                            )

                            # Display mean_im
                            display_rect = (
                                2 * margin + (scale * imw),
                                margin,
                                scale * mean_im.shape[1],
                                scale * mean_im.shape[0],
                            )
                            user_rect = (raw_l, raw_b, imw, imh)
                            with canv.set_user_coords(
                                    display_rect,
                                    user_rect,
                                    transform=cam_id2view[cam_id],
                            ):
                                canv.imshow(mean_im.astype(np.uint8), raw_l,
                                            raw_b)
                            canv.text(
                                "mean",
                                display_rect[0],
                                display_rect[1] + display_rect[3],
                                font_size=font_size,
                                color_rgba=(0.5, 0.5, 0.9, 1),
                                shadow_offset=1,
                            )

                            # Display absdiff_im
                            display_rect = (
                                3 * margin + (scale * imw) * 2,
                                margin,
                                scale * absdiff_im.shape[1],
                                scale * absdiff_im.shape[0],
                            )
                            user_rect = (raw_l, raw_b, imw, imh)
                            absdiff_clip = np.clip(absdiff_im * contrast_scale,
                                                   0, 255)
                            with canv.set_user_coords(
                                    display_rect,
                                    user_rect,
                                    transform=cam_id2view[cam_id],
                            ):
                                canv.imshow(absdiff_clip.astype(np.uint8),
                                            raw_l, raw_b)
                            canv.text(
                                "absdiff",
                                display_rect[0],
                                display_rect[1] + display_rect[3],
                                font_size=font_size,
                                color_rgba=(0.5, 0.5, 0.9, 1),
                                shadow_offset=1,
                            )

                            # Display morphed_im
                            display_rect = (
                                4 * margin + (scale * imw) * 3,
                                margin,
                                scale * morphed_im.shape[1],
                                scale * morphed_im.shape[0],
                            )
                            user_rect = (raw_l, raw_b, imw, imh)
                            morphed_clip = np.clip(morphed_im * contrast_scale,
                                                   0, 255)
                            with canv.set_user_coords(
                                    display_rect,
                                    user_rect,
                                    transform=cam_id2view[cam_id],
                            ):
                                canv.imshow(morphed_clip.astype(np.uint8),
                                            raw_l, raw_b)
                            if 0:
                                canv.text(
                                    "morphed",
                                    display_rect[0],
                                    display_rect[1] + display_rect[3],
                                    font_size=font_size,
                                    color_rgba=(0.5, 0.5, 0.9, 1),
                                    shadow_offset=1,
                                )

                            # Display time-averaged absdiff_im
                            display_rect = (
                                5 * margin + (scale * imw) * 4,
                                margin,
                                scale * av_im_show.shape[1],
                                scale * av_im_show.shape[0],
                            )
                            user_rect = (
                                lowerleft[0],
                                lowerleft[1],
                                av_im_show.shape[1],
                                av_im_show.shape[0],
                            )
                            with canv.set_user_coords(
                                    display_rect,
                                    user_rect,
                                    transform=cam_id2view[cam_id],
                            ):
                                canv.imshow(
                                    av_im_show.astype(np.uint8),
                                    lowerleft[0],
                                    lowerleft[1],
                                )
                                canv.plot(
                                    xplt, yplt,
                                    color_rgba=(0, 1, 0,
                                                0.5))  # the orientation line
                            canv.text(
                                "stacked/flattened",
                                display_rect[0],
                                display_rect[1] + display_rect[3],
                                font_size=font_size,
                                color_rgba=(0.5, 0.5, 0.9, 1),
                                shadow_offset=1,
                            )

                            canv.text(
                                "%s frame % 7d: eccentricity % 5.1f, min N images %d, actual N images %d"
                                % (
                                    cam_id,
                                    fno,
                                    eccentricity,
                                    stack_N_images_min,
                                    n_good_images,
                                ),
                                0,
                                15,
                                font_size=font_size,
                                color_rgba=(0.6, 0.7, 0.9, 1),
                                shadow_offset=1,
                            )
                            canv.save()

                # Save results to new table
                if 0:
                    recarray = np.rec.array(list_of_rows_of_data2d,
                                            dtype=Info2DCol_description)
                    dest_table.append(recarray)
                    dest_table.flush()
            dest_table.attrs.has_ibo_data = True
        data_file.close()
Пример #7
0
                fmf = ufmf.FlyMovieEmulator(ufmf_filename,
                                            #darken=-50,
                                            allow_no_such_frame_errors=True)
                timestamps = fmf.get_all_timestamps()

                cam_id = get_cam_id_from_filename(fmf.filename, cam_id2camns.keys())
                cam_id2fmfs[cam_id].append(
                    (fmf,result_utils.Quick1DIndexer(timestamps)))

                cam_id2view[cam_id] = filename2view[fmf.filename]

            # associate framenumbers with timestamps using 2d .h5 file
            data2d = h5.root.data2d_distorted[:] # load to RAM
            data2d_idxs = np.arange(len(data2d))
            h5_framenumbers = data2d['frame']
            h5_frame_qfi = result_utils.QuickFrameIndexer(h5_framenumbers)

            fpc = realtime_image_analysis.FitParamsClass() # allocate FitParamsClass

            for obj_id_enum,obj_id in enumerate(use_obj_ids):
                print 'object %d of %d'%(obj_id_enum,len(use_obj_ids))

                # get all images for this camera and this obj_id

                obj_3d_rows = ca.load_dynamics_free_MLE_position( obj_id, data_file)

                this_obj_framenumbers = collections.defaultdict(list)
                if save_images:
                    this_obj_raw_images = collections.defaultdict(list)
                    this_obj_mean_images = collections.defaultdict(list)
                this_obj_absdiff_images = collections.defaultdict(list)