Exemplo n.º 1
0
def h5_shorten(input_filename, output_filename, options):
    with open_file_safe(input_filename, mode="r") as h5:
        with open_file_safe(output_filename, mode="w",
                            delete_on_error=True) as output_h5:
            if not options.data2d_only:
                do_data_association_tables(h5, output_h5, options)
            for node in h5.root._f_iter_nodes():
                if hasattr(node, "name") and node.name in [
                        "data2d_distorted",
                        "kalman_estimates",
                ]:
                    if options.data2d_only:
                        if node.name != "data2d_distorted":
                            continue
                    print("selectively copying", node)
                    copy_selective(h5, node, output_h5.root, options)
                elif hasattr(node, "name") and node.name in [
                        "ML_estimates",
                        "ML_estimates_2d_idxs",
                ]:
                    continue
                else:
                    # copy everything from source to dest
                    print("copying entire", node)
                    node._f_copy(output_h5.root, recursive=True)
Exemplo n.º 2
0
def h5_shorten(input_filename, output_filename, options):
    with open_file_safe(input_filename, mode='r') as h5:
        with open_file_safe(output_filename, mode='w',
                            delete_on_error=True) as output_h5:
            if not options.data2d_only:
                do_data_association_tables(h5, output_h5, options)
            for node in h5.root._f_iter_nodes():
                if (hasattr(node, 'name') and node.name
                        in ['data2d_distorted', 'kalman_estimates']):
                    if options.data2d_only:
                        if node.name != 'data2d_distorted':
                            continue
                    print 'selectively copying', node
                    copy_selective(h5, node, output_h5.root, options)
                elif (hasattr(node, 'name') and node.name
                      in ['ML_estimates', 'ML_estimates_2d_idxs']):
                    continue
                else:
                    # copy everything from source to dest
                    print 'copying entire', node
                    node._f_copy(output_h5.root, recursive=True)
Exemplo n.º 3
0
def kalmanize(
    src_filename,
    do_full_kalmanization=True,
    dest_filename=None,
    reconstructor=None,
    reconstructor_filename=None,
    start_frame=None,
    stop_frame=None,
    exclude_cam_ids=None,
    exclude_camns=None,
    dynamic_model_name=None,
    debug=False,
    frames_per_second=None,
    area_threshold=0,
    min_observations_to_save=0,
    options=None,
):
    if options is None:
        # get default options
        parser = get_parser()
        (options, args) = parser.parse_args([])

    if debug:
        numpy.set_printoptions(precision=3, linewidth=120, suppress=False)

    if exclude_cam_ids is None:
        exclude_cam_ids = []

    if exclude_camns is None:
        exclude_camns = []

    use_existing_filename = True

    if reconstructor is not None:
        assert isinstance(reconstructor, flydra_core.reconstruct.Reconstructor)
        assert reconstructor_filename is None

    with open_file_safe(src_filename, mode='r') as results:
        camn2cam_id, cam_id2camns = get_caminfo_dicts(results)

        if do_full_kalmanization:
            if dynamic_model_name is None:
                if hasattr(results.root, 'kalman_estimates'):
                    if hasattr(results.root.kalman_estimates.attrs,
                               'dynamic_model_name'):
                        dynamic_model_name = (results.root.kalman_estimates.
                                              attrs.dynamic_model_name)
                        warnings.warn('dynamic model not specified. '
                                      'using "%s"' % dynamic_model_name)
            if dynamic_model_name is None:
                dynamic_model_name = 'EKF mamarama, units: mm'
                warnings.warn('dynamic model not specified. '
                              'using "%s"' % dynamic_model_name)
            else:
                print 'using dynamic model "%s"' % dynamic_model_name

            if reconstructor_filename is not None:
                if reconstructor_filename.endswith('h5'):
                    with PT.open_file(reconstructor_filename, mode='r') as fd:
                        reconstructor = flydra_core.reconstruct.Reconstructor(
                            fd,
                            minimum_eccentricity=options.
                            force_minimum_eccentricity)
                else:
                    reconstructor = flydra_core.reconstruct.Reconstructor(
                        reconstructor_filename,
                        minimum_eccentricity=options.force_minimum_eccentricity
                    )
            else:
                # reconstructor_filename is None
                if reconstructor is None:
                    reconstructor = flydra_core.reconstruct.Reconstructor(
                        results,
                        minimum_eccentricity=options.force_minimum_eccentricity
                    )

            if options.force_minimum_eccentricity is not None:
                if (reconstructor.minimum_eccentricity !=
                        options.force_minimum_eccentricity):
                    raise ValueError('could not force minimum_eccentricity')

            if dest_filename is None:
                dest_filename = os.path.splitext(
                    results.filename)[0] + '.kalmanized.h5'
        else:
            use_existing_filename = False
            dest_filename = tempfile.mktemp(suffix='.h5')

        if reconstructor is not None and reconstructor.cal_source_type == 'pytables':
            save_reconstructor_filename = reconstructor.cal_source.filename
        else:
            warnings.warn('unable to determine reconstructor source '
                          'filename for %r' % reconstructor.cal_source_type)
            save_reconstructor_filename = None

        if frames_per_second is None:
            frames_per_second = get_fps(results)
            if do_full_kalmanization:
                print 'read frames_per_second from file', frames_per_second

        dt = 1.0 / frames_per_second

        if options.sync_error_threshold_msec is None:
            # default is IFI/2
            sync_error_threshold = (0.5 * dt)
        else:
            sync_error_threshold = options.sync_error_threshold_msec / 1000.0

        if os.path.exists(dest_filename):
            if use_existing_filename:
                raise ValueError('%s already exists. Will not '
                                 'overwrite.' % dest_filename)
            else:
                os.unlink(dest_filename)

        with open_file_safe(dest_filename,
                            mode="w",
                            title="tracked Flydra data file",
                            delete_on_error=True) as h5file:

            if 'experiment_info' in results.root:
                results.root.experiment_info._f_copy(h5file.root,
                                                     recursive=True)

            if do_full_kalmanization:
                parsed = read_textlog_header(results)
                if 'trigger_CS3' not in parsed:
                    parsed['trigger_CS3'] = 'unknown'
                textlog_save_lines = [
                    'kalmanize running at %s fps, (top %s, trigger_CS3 %s, flydra_version %s)'
                    % (str(frames_per_second), str(parsed.get(
                        'top', 'unknown')), str(parsed['trigger_CS3']),
                       flydra_core.version.__version__),
                    'original file: %s' % (src_filename, ),
                    'dynamic model: %s' % (dynamic_model_name, ),
                    'reconstructor file: %s' % (save_reconstructor_filename, ),
                ]

                kalman_model = dynamic_models.get_kalman_model(
                    name=dynamic_model_name, dt=dt)

                h5saver = KalmanSaver(
                    h5file,
                    reconstructor,
                    cam_id2camns=cam_id2camns,
                    min_observations_to_save=min_observations_to_save,
                    textlog_save_lines=textlog_save_lines,
                    dynamic_model_name=dynamic_model_name,
                    dynamic_model=kalman_model,
                    debug=debug,
                    fake_timestamp=options.fake_timestamp,
                )

                tracker = Tracker(
                    reconstructor,
                    kalman_model=kalman_model,
                    save_all_data=True,
                    area_threshold=area_threshold,
                    area_threshold_for_orientation=options.
                    area_threshold_for_orientation,
                    disable_image_stat_gating=options.
                    disable_image_stat_gating,
                    orientation_consensus=options.orientation_consensus,
                    fake_timestamp=options.fake_timestamp,
                )

                tracker.set_killed_tracker_callback(h5saver.save_tro)

                # copy timestamp data into newly created kalmanized file
                if hasattr(results.root, 'trigger_clock_info'):
                    results.root.trigger_clock_info._f_copy(h5file.root)

            data2d = results.root.data2d_distorted

            frame_count = 0
            last_frame = None
            frame_data = collections.defaultdict(list)
            time_frame_all_cam_timestamps = []
            time_frame_all_camns = []

            if 1:
                time1 = time.time()
                if do_full_kalmanization:
                    print 'loading all frame numbers...'
                frames_array = numpy.asarray(data2d.read(field='frame'))
                time2 = time.time()
                if do_full_kalmanization:
                    print 'done in %.1f sec' % (time2 - time1)
                    if (not options.disable_image_stat_gating
                            and 'cur_val' in data2d.colnames):
                        warnings.warn(
                            'No pre-filtering of data based on zero '
                            'probability -- more data association work is '
                            'being done than necessary')

            if len(frames_array) == 0:
                # no data
                print 'No 2D data. Nothing to do.'
                return

            if do_full_kalmanization:
                print '2D data range: approximately %d<frame<%d' % (
                    frames_array[0], frames_array[-1])

            if do_full_kalmanization:
                accum_frame_spread = None
            else:
                accum_frame_spread = []
                accum_frame_spread_fno = []
                accum_frame_all_timestamps = []
                accum_frame_all_camns = []

            max_all_check_times = -np.inf

            for row_start, row_stop in utils.iter_non_overlapping_chunk_start_stops(
                    frames_array,
                    min_chunk_size=500000,
                    size_increment=1000,
                    status_fd=sys.stdout):

                print 'Doing initial scan of approx frame range %d-%d.' % (
                    frames_array[row_start], frames_array[row_stop - 1])

                this_frames_array = frames_array[row_start:row_stop]
                if start_frame is not None:
                    if this_frames_array.max() < start_frame:
                        continue
                if stop_frame is not None:
                    if this_frames_array.min() > stop_frame:
                        continue

                data2d_recarray = data2d.read(start=row_start, stop=row_stop)
                this_frames = data2d_recarray['frame']
                print 'Examining frames %d-%d in detail.' % (this_frames[0],
                                                             this_frames[-1])
                this_row_idxs = np.argsort(this_frames)
                for ii in range(len(this_row_idxs) + 1):

                    if ii >= len(this_row_idxs):
                        finish_frame = True
                    else:
                        finish_frame = False

                        this_row_idx = this_row_idxs[ii]

                        row = data2d_recarray[this_row_idx]

                        new_frame = row['frame']

                        if start_frame is not None:
                            if new_frame < start_frame:
                                continue
                        if stop_frame is not None:
                            if new_frame > stop_frame:
                                continue

                        if last_frame != new_frame:
                            if new_frame < last_frame:
                                print 'new_frame', new_frame
                                print 'last_frame', last_frame
                                raise RuntimeError(
                                    "expected continuously increasing "
                                    "frame numbers")
                            finish_frame = True

                    if finish_frame:
                        # new frame
                        ########################################
                        # Data for this frame is complete
                        if last_frame is not None:

                            this_frame_spread = 0.0
                            if len(time_frame_all_cam_timestamps) > 1:
                                check_times = np.array(
                                    time_frame_all_cam_timestamps)
                                check_times -= check_times.min()
                                this_frame_spread = check_times.max()
                                if accum_frame_spread is not None:
                                    accum_frame_spread.append(
                                        this_frame_spread)
                                    accum_frame_spread_fno.append(last_frame)

                                    accum_frame_all_timestamps.append(
                                        time_frame_all_cam_timestamps)
                                    accum_frame_all_camns.append(
                                        time_frame_all_camns)

                                max_all_check_times = max(
                                    this_frame_spread, max_all_check_times)
                                if this_frame_spread > sync_error_threshold:
                                    if this_frame_spread == max_all_check_times:
                                        print '%s frame %d: sync diff: %.1f msec' % (
                                            os.path.split(
                                                results.filename)[-1],
                                            last_frame,
                                            this_frame_spread * 1000.0)

                            if debug > 5:
                                print
                                print 'frame_data for frame %d' % (
                                    last_frame, )
                                pprint.pprint(dict(frame_data))
                                print
                            if do_full_kalmanization:
                                if this_frame_spread > sync_error_threshold:
                                    if debug > 5:
                                        print(
                                            'frame sync error (spread %.1f msec), '
                                            'skipping' %
                                            (this_frame_spread * 1e3, ))
                                        print
                                    warnings.warn(
                                        'Synchronization error detected, '
                                        'but continuing analysis without '
                                        'potentially bad data.')
                                else:
                                    process_frame(reconstructor,
                                                  tracker,
                                                  last_frame,
                                                  frame_data,
                                                  camn2cam_id,
                                                  debug=debug)
                            frame_count += 1
                            if do_full_kalmanization and frame_count % 1000 == 0:
                                time2 = time.time()
                                dur = time2 - time1
                                fps = frame_count / dur
                                print 'frame % 10d, kalmanization/data association speed: % 8.1f fps' % (
                                    last_frame, fps)
                                time1 = time2
                                frame_count = 0

                        ########################################
                        frame_data = collections.defaultdict(list)
                        time_frame_all_cam_timestamps = []  # clear values
                        time_frame_all_camns = []  # clear values
                        last_frame = new_frame

                    camn = row['camn']
                    try:
                        cam_id = camn2cam_id[camn]
                    except KeyError:
                        # This will happen if cameras were re-synchronized (and
                        # thus gain new cam_ids) immediately before saving was
                        # turned on in MainBrain. The reason is that the network
                        # buffers are still full of old data coming in from the
                        # cameras.
                        warnings.warn('WARNING: no cam_id for camn '
                                      '%d, skipping this row of data' % camn)
                        continue

                    if cam_id in exclude_cam_ids:
                        # exclude this camera
                        continue

                    if camn in exclude_camns:
                        # exclude this camera
                        continue

                    time_frame_all_cam_timestamps.append(row['timestamp'])
                    time_frame_all_camns.append(row['camn'])

                    if do_full_kalmanization:

                        x_distorted = row['x']
                        if numpy.isnan(x_distorted):
                            # drop point -- not found
                            continue
                        y_distorted = row['y']

                        (x_undistorted,
                         y_undistorted) = reconstructor.undistort(
                             cam_id, (x_distorted, y_distorted))

                        (area, slope, eccentricity,
                         frame_pt_idx) = (row['area'], row['slope'],
                                          row['eccentricity'],
                                          row['frame_pt_idx'])

                        if 'cur_val' in row.dtype.fields:
                            cur_val = row['cur_val']
                        else:
                            cur_val = None
                        if 'mean_val' in row.dtype.fields:
                            mean_val = row['mean_val']
                        else:
                            mean_val = None
                        if 'sumsqf_val' in row.dtype.fields:
                            sumsqf_val = row['sumsqf_val']
                        else:
                            sumsqf_val = None

                        # FIXME: cache this stuff?
                        pmat_inv = reconstructor.get_pmat_inv(cam_id)
                        camera_center = reconstructor.get_camera_center(cam_id)
                        camera_center = numpy.hstack((camera_center[:,
                                                                    0], [1]))
                        camera_center_meters = reconstructor.get_camera_center(
                            cam_id)
                        camera_center_meters = numpy.hstack(
                            (camera_center_meters[:, 0], [1]))
                        helper = reconstructor.get_reconstruct_helper_dict(
                        )[cam_id]
                        rise = slope
                        run = 1.0
                        if np.isinf(rise):
                            if rise > 0:
                                rise = 1.0
                                run = 0.0
                            else:
                                rise = -1.0
                                run = 0.0

                        (p1, p2, p3, p4, ray0, ray1, ray2, ray3, ray4,
                         ray5) = do_3d_operations_on_2d_point(
                             helper, x_undistorted, y_undistorted, pmat_inv,
                             camera_center, x_distorted, y_distorted, rise,
                             run)
                        line_found = not numpy.isnan(p1)
                        pluecker_hz_meters = (ray0, ray1, ray2, ray3, ray4,
                                              ray5)

                        # Keep in sync with kalmanize.py and data_descriptions.py
                        pt_undistorted = (x_undistorted, y_undistorted, area,
                                          slope, eccentricity, p1, p2, p3, p4,
                                          line_found, frame_pt_idx, cur_val,
                                          mean_val, sumsqf_val)

                        projected_line_meters = geom.line_from_HZline(
                            pluecker_hz_meters)

                        frame_data[camn].append(
                            (pt_undistorted, projected_line_meters))

            if do_full_kalmanization:
                tracker.kill_all_trackers()  # done tracking

        if not do_full_kalmanization:
            os.unlink(dest_filename)

    if accum_frame_spread is not None:
        # save spread data to file for analysis
        accum_frame_spread = np.array(accum_frame_spread)
        accum_frame_spread_fno = np.array(accum_frame_spread_fno)
        if options.dest_file is not None:
            accum_frame_spread_filename = options.dest_file
        else:
            accum_frame_spread_filename = src_filename + '.spreadh5'

        cam_ids = cam_id2camns.keys()
        cam_ids.sort()
        camn_order = []
        for cam_id in cam_ids:
            camn_order.extend(cam_id2camns[cam_id])

        camn_order = np.array(camn_order)
        cam_id_array = np.array(cam_ids)

        N_cams = len(camn_order)
        N_frames = len(accum_frame_spread_fno)

        all_timestamps = np.empty((N_frames, N_cams), dtype=np.float)
        all_timestamps.fill(np.nan)
        for i, (timestamps, camns) in enumerate(
                zip(accum_frame_all_timestamps, accum_frame_all_camns)):

            for j, camn in enumerate(camn_order):
                try:
                    idx = camns.index(camn)
                except ValueError:
                    continue  # not found, skip
                timestamp = timestamps[idx]
                all_timestamps[i, j] = timestamp

        h5 = tables.open_file(accum_frame_spread_filename, mode='w')
        h5.create_array(h5.root, 'spread', accum_frame_spread,
                        'frame timestamp spreads (sec)')
        h5.create_array(h5.root, 'framenumber', accum_frame_spread_fno,
                        'frame number')
        h5.create_array(h5.root, 'all_timestamps', all_timestamps,
                        'all timestamps')
        h5.create_array(h5.root, 'camn_order', camn_order, 'camn_order')
        h5.create_array(h5.root, 'cam_id_array', cam_id_array, 'cam_id_array')
        h5.close()
        print 'saved %s' % accum_frame_spread_filename

    if max_all_check_times > sync_error_threshold:
        if not options.keep_sync_errors:
            if do_full_kalmanization:
                print 'max_all_check_times %.2f msec' % (max_all_check_times *
                                                         1000.0)
                handle, target = tempfile.mkstemp(
                    os.path.split(dest_filename)[1])
                os.unlink(target)  # remove original file there
                shutil.move(dest_filename, target)

                raise ValueError(
                    'Synchonization errors exist in the data. Moved result file'
                    ' to ensure it is not confused with valid data. The new '
                    'location is: %s' % (target, ))

            else:
                sys.exit(1)  # sync error
    else:
        if not do_full_kalmanization:
            print '%s no sync differences greater than %.1f msec' % (
                os.path.split(src_filename)[-1],
                sync_error_threshold * 1000.0,
            )
Exemplo n.º 4
0
def retrack_reuse_data_association(
    h5_filename=None,
    output_h5_filename=None,
    kalman_filename=None,
    start=None,
    stop=None,
    less_ram=False,
    show_progress=False,
    show_progress_json=False,
):
    if os.path.exists(output_h5_filename):
        raise RuntimeError("will not overwrite old file '%s'" %
                           output_h5_filename)

    ca = core_analysis.get_global_CachingAnalyzer()
    with ca.kalman_analysis_context(kalman_filename,
                                    data2d_fname=h5_filename) as h5_context:
        R = h5_context.get_reconstructor()
        if less_ram:
            ML_estimates_2d_idxs = h5_context.get_pytable_node(
                'ML_estimates_2d_idxs')
        else:
            ML_estimates_2d_idxs = h5_context.load_entire_table(
                'ML_estimates_2d_idxs')
        use_obj_ids = h5_context.get_unique_obj_ids()
        extra = h5_context.get_extra_info()
        dt = 1.0 / extra['frames_per_second']
        dynamic_model_name = extra['dynamic_model_name']
        kalman_model = dynamic_models.get_kalman_model(name=dynamic_model_name,
                                                       dt=dt)
        kalman_model['max_frames_skipped'] = 2**62  # close to max i64

        fps = extra['frames_per_second']
        camn2cam_id, cam_id2camns = h5_context.get_caminfo_dicts()

        parsed = h5_context.read_textlog_header()
        if 'trigger_CS3' not in parsed:
            parsed['trigger_CS3'] = 'unknown'

        textlog_save_lines = [
            'retrack_reuse_data_association running at %s fps, (top %s, trigger_CS3 %s, flydra_version %s)'
            %
            (str(fps), str(parsed.get('top', 'unknown')),
             str(parsed['trigger_CS3']), flydra_analysis.version.__version__),
            'original file: %s' % (kalman_filename, ),
            'dynamic model: %s' % (dynamic_model_name, ),
            'reconstructor file: %s' % (kalman_filename, ),
        ]

        with open_file_safe(output_h5_filename,
                            mode="w",
                            title="tracked Flydra data file",
                            delete_on_error=True) as output_h5:

            h5saver = KalmanSaver(
                output_h5,
                R,
                cam_id2camns=cam_id2camns,
                min_observations_to_save=0,
                textlog_save_lines=textlog_save_lines,
                dynamic_model_name=dynamic_model_name,
                dynamic_model=kalman_model,
            )

            # associate framenumbers with timestamps using 2d .h5 file
            if less_ram:
                data2d = h5_context.get_pytable_node('data2d_distorted',
                                                     from_2d_file=True)
                h5_framenumbers = data2d.cols.frame[:]
            else:
                data2d = h5_context.load_entire_table('data2d_distorted',
                                                      from_2d_file=True)
                h5_framenumbers = data2d['frame']
            h5_frame_qfi = result_utils.QuickFrameIndexer(h5_framenumbers)

            if show_progress:
                string_widget = StringWidget()
                objs_per_sec_widget = progressbar.FileTransferSpeed(
                    unit='obj_ids ')
                widgets = [
                    string_widget, objs_per_sec_widget,
                    progressbar.Percentage(),
                    progressbar.Bar(),
                    progressbar.ETA()
                ]
                pbar = progressbar.ProgressBar(
                    widgets=widgets, maxval=len(use_obj_ids)).start()

            for obj_id_enum, obj_id in enumerate(use_obj_ids):
                if show_progress:
                    string_widget.set_string('[obj_id: % 5d]' % obj_id)
                    pbar.update(obj_id_enum)
                if show_progress_json and obj_id_enum % 100 == 0:
                    rough_percent_done = float(obj_id_enum) / len(
                        use_obj_ids) * 100.0
                    result_utils.do_json_progress(rough_percent_done)

                tro = None
                first_frame_per_obj = True
                obj_3d_rows = h5_context.load_dynamics_free_MLE_position(
                    obj_id)
                for this_3d_row in obj_3d_rows:
                    # iterate over each sample in the current camera
                    framenumber = this_3d_row['frame']
                    if start is not None:
                        if not framenumber >= start:
                            continue
                    if stop is not None:
                        if not framenumber <= stop:
                            continue
                    h5_2d_row_idxs = h5_frame_qfi.get_frame_idxs(framenumber)
                    if len(h5_2d_row_idxs) == 0:
                        # At the start, there may be 3d data without 2d data.
                        continue

                    # If there was a 3D ML estimate, there must be 2D data.

                    frame2d = data2d[h5_2d_row_idxs]

                    obs_2d_idx = this_3d_row['obs_2d_idx']
                    kobs_2d_data = ML_estimates_2d_idxs[int(obs_2d_idx)]

                    # Parse VLArray.
                    this_camns = kobs_2d_data[0::2]
                    this_camn_idxs = kobs_2d_data[1::2]

                    # Now, for each camera viewing this object at this
                    # frame, extract images.
                    observation_camns = []
                    observation_idxs = []
                    data_dict = {}
                    used_camns_and_idxs = []
                    cam_ids_and_points2d = []

                    for camn, frame_pt_idx in zip(this_camns, this_camn_idxs):
                        try:
                            cam_id = camn2cam_id[camn]
                        except KeyError:
                            warnings.warn('camn %d not found' % (camn, ))
                            continue

                        # find 2D point corresponding to object
                        cond = ((frame2d['camn'] == camn) &
                                (frame2d['frame_pt_idx'] == frame_pt_idx))
                        idxs = np.nonzero(cond)[0]
                        if len(idxs) == 0:
                            #no frame for that camera (start or stop of file)
                            continue
                        elif len(idxs) > 1:
                            print "MEGA WARNING MULTIPLE 2D POINTS\n", camn, frame_pt_idx, "\n\n"
                            continue

                        idx = idxs[0]

                        frame2d_row = frame2d[idx]
                        x2d_real = frame2d_row['x'], frame2d_row['y']
                        pt_undistorted = R.undistort(cam_id, x2d_real)
                        x2d_area = frame2d_row['area']

                        observation_camns.append(camn)
                        observation_idxs.append(idx)
                        candidate_point_list = []
                        data_dict[camn] = candidate_point_list
                        used_camns_and_idxs.append((camn, frame_pt_idx, None))

                        # with no orientation
                        observed_2d = (pt_undistorted[0], pt_undistorted[1],
                                       x2d_area)

                        cam_ids_and_points2d.append((cam_id, observed_2d))

                    if first_frame_per_obj:
                        if len(cam_ids_and_points2d) < 2:
                            warnings.warn(
                                'some 2D data seems to be missing, cannot completely reconstruct'
                            )
                        else:
                            X3d = R.find3d(
                                cam_ids_and_points2d,
                                return_line_coords=False,
                                simulate_via_tracking_dynamic_model=kalman_model
                            )

                            # first frame
                            tro = TrackedObject(
                                R,
                                obj_id,
                                framenumber,
                                X3d,  # obs0_position
                                None,  # obs0_Lcoords
                                observation_camns,  # first_observation_camns
                                observation_idxs,  # first_observation_idxs
                                kalman_model=kalman_model,
                            )
                            del X3d
                            first_frame_per_obj = False
                    else:
                        tro.calculate_a_posteriori_estimate(
                            framenumber,
                            data_dict,
                            camn2cam_id,
                            skip_data_association=True,
                            original_camns_and_idxs=used_camns_and_idxs,
                            original_cam_ids_and_points2d=cam_ids_and_points2d,
                        )

                # done with all data for this obj_id
                if tro is not None:
                    tro.kill()
                    h5saver.save_tro(tro, force_obj_id=obj_id)
    if show_progress_json:
        result_utils.do_json_progress(100)
Exemplo n.º 5
0
if not os.path.isdir(data_src):
    braidz = ".braidz"
    assert data_src.endswith(braidz)
    zipname = data_src
    data_dir = tempfile.mkdtemp(suffix=".braid")
    archive = zipfile.ZipFile(zipname, mode="r")
    archive.extractall(data_dir)
    unconverted_output_dir = os.path.join(data_src[:-len(braidz)]+'.unconverted')
    also_delete_braidz = data_src
else:
    data_dir = data_src
    unconverted_output_dir = os.path.join(data_src+'.unconverted')

delete_original = not args.no_delete
with open_file_safe(
    dest_filename, mode="w", title="tracked Flydra data file", delete_on_error=True
) as h5file:

    do_2d_only = vars(args)["2d_only"]

    kest_csv_fname = os.path.join(data_dir, "kalman_estimates.csv")
    kest_csv_gz_fname = os.path.join(data_dir, "kalman_estimates.csv.gz")
    if not (os.path.exists(kest_csv_fname) or os.path.exists(kest_csv_gz_fname)):
        do_2d_only = True

    if do_2d_only:
        print("converting 2D data only")
    else:
        convert_program = "compute-flydra1-compat"
        if not os.path.exists(computed_dir(data_dir)):
            cmd = [convert_program, "--help"]