class Evaluator(object):
    def __init__(self):
        self.System_Markers = {}
        self.GT_Markers = {}
        self.gt_db = None
        self.system_db = None

    def load_GT_marker_from_clickpoints(self, path, type):
        self.gt_db = DataFileExtended(path)
        markers = np.asarray([[m.image.sort_index, m.x, m.y] for m in self.gt_db.getMarkers(type=type)])
        self.GT_Markers.update(dict([[t, markers[markers.T[0]==t].T[1:]] for t in set(markers.T[0])]))

    def load_System_marker_from_clickpoints(self, path, type):
        self.system_db = DataFileExtended(path)
        image_filenames = set([img.filename for img in self.gt_db.getImages()]).intersection([img.filename for img in self.system_db.getImages()])
        markers = np.asarray([[self.gt_db.getImage(filename=m.image.filename).sort_index, m.x, m.y] for m in self.system_db.getMarkers(type=type) if m.image.filename in image_filenames])
        self.System_Markers.update(dict([[t, markers[markers.T[0]==t].T[1:]] for t in set(markers.T[0])]))

    def save_marker_to_GT_db(self, markers, path, type):
        # if function is None:
        #     function = lambda x : x
        self.gt_db = DataFileExtended(path)

        with self.gt_db.db.atomic() as transaction:
            self.gt_db.deleteMarkers(type=type)
            for t in markers:
                self.gt_db.setMarkers(image=self.gt_db.getImage(frame=t),x=markers[t].T[0],y=markers[t].T[1], type=type)

    def save_marker_to_System_db(self, markers, path, type):
        if function is None:
            function = lambda x : x
        self.system_db = DataFileExtended(path)

        with self.system_db.db.atomic() as transaction:
            self.system_db.deleteMarkers(type=type)
            for t in markers:
                self.system_db.setMarkers(image=self.system_db.getImage(frame=t),x=markers[t].T[0],y=markers[t].T[1], type=type)
def AnalyzeDB(db_str):
    db = DataFileExtended(db_str)
    time_step = 110
    v_fac = 0.645 / (time_step / 60.)
    perc = 30
    step = 20
    type = 'PT_Track_Marker'
    Frame_list = [f.sort_index for f in db.getImages()]
    Frames = np.amax(Frame_list)

    timer()
    Tracks = db.PT_tracks_from_db(type)
    timer("Normal Tracks")
    Tracks_woMeasurements = db.PT_tracks_from_db(type, get_measurements=False)
    timer("Tracks WO")

    Z_posis = Z_from_PT_Track(Tracks, v_fac)
    timer("Z_posis")

    tracks_to_delete = deletable_tracks(Z_posis, perc)
    timer("Trackstodelete")

    list2 = getXY(Tracks_woMeasurements)  # Create List for true dist
    timer("GETXY")

    drift, drift_list, missing_frame = TCell_Analysis.Drift(
        Frames, list2, 5)  # Create List with offsets
    timer("Drift")
    list2 = TCell_Analysis.list2_with_drift(
        db, drift, tracks_to_delete,
        del_track=True)  # Create list for true dist with drift_cor

    list2 = getXY_drift_corrected(Tracks_woMeasurements,
                                  np.vstack([[0, 0], drift]))
    timer("CorrectXY")

    list1 = analysis_list_from_tracks(
        Frames,
        Tracks_woMeasurements,
        drift=drift,
        Drift=True,
        Missing=missing_frame)  # Create List for analysis

    ### For Deleting Tracks above and below
    list_copy = list1[:]
    for l, m in enumerate(list1):
        keys = m.keys()
        for k, j in enumerate(keys):
            if j in tracks_to_delete:
                del list_copy[l][j]

    directions, velocities, dirt, alternative_vel, vel_mean, dir_mean, alt_vel_mean = TCell_Analysis.measure(
        step, time_step, list1, Frames)  # Calculate directions and velocities
    timer("Measure")

    motile_percentage, mean_v, mean_dire, number, len_count, mo_p_al, me_v_al, me_d_al = TCell_Analysis.values(
        directions,
        velocities,
        db,
        dirt,
        alternative_vel,
        tracks_to_delete,
        del_Tracks=True)
    timer("Values")
    motile_per_true_dist, real_dirt = TCell_Analysis.motiletruedist(list2)
    timer("Motile True Dist")
class SegmentationEvaluator(object):
    def __init__(self):
        self.System_Masks = []
        self.GT_Masks = []
        self.gt_db = None
        self.system_db = None
        self.specificity = {}
        self.true_negative_rate = self.specificity
        self.sensitivity = {}
        self.true_positive_rate = self.sensitivity
        self.precision = {}
        self.positive_predictive_value = self.precision
        self.negative_predictive_value = {}
        self.false_positive_rate = {}
        self.fall_out = self.false_positive_rate
        self.false_negative_rate = {}
        self.false_discovery_rate = {}
        self.accuracy = {}
        self.F1_score = {}
        self.MCC = {}
        self.informedness = {}
        self.markedness = {}
        self.positive_rate = {}
        self.LeeLiu_rate = {}

    def load_GT_masks_from_clickpoints(self, path):
        self.gt_db = DataFileExtended(path)
        # self.GT_Masks.extend([str(m.image.timestamp) for m in self.gt_db.getMasks()])
        timestamps = self.gt_db.db.execute_sql(
            "select image.timestamp from image inner join mask on mask.image_id=image.id where mask.data is not null"
        ).fetchall()
        self.GT_Masks.extend([str(t[0]) for t in timestamps])
        # self.GT_Masks.extend(set([str(m.image.timestamp) for m in self.gt_db.getMasks()]).update(set(self.GT_Masks)))

    def load_System_masks_from_clickpoints(self, path):
        self.system_db = DataFileExtended(path)
        # self.System_Masks.extend([str(m.image.timestamp) for m in self.system_db.getMasks()])
        timestamps = self.system_db.db.execute_sql(
            "select image.timestamp from image inner join mask on mask.image_id=image.id where mask.data is not null"
        ).fetchall()
        self.System_Masks.extend([str(t[0]) for t in timestamps])
        # for im in self.system_db.getImages():
        #     if im.mask is not None:
        #         print(im.timestamp)
        #         self.System_Masks.append(str(im.timestamp))
        # self.System_Masks.extend(set([str(m.image.timestamp) for m in self.system_db.getMasks()]).update(set(self.System_Masks_Masks)))

    def match(self, gt_inverse=True, system_inverse=True):
        stamps = set(self.GT_Masks).intersection(self.System_Masks)
        for stamp in stamps:
            if system_inverse:
                sm = ~self.system_db.getMask(image=self.system_db.getImages(
                    timestamp=stamp)[0]).data.astype(bool)
            else:
                sm = self.system_db.getMask(image=self.system_db.getImages(
                    timestamp=stamp)[0]).data.astype(bool)
            if gt_inverse:
                gt = ~self.gt_db.getMask(image=self.gt_db.getImages(
                    timestamp=stamp)[0]).data.astype(bool)
            else:
                gt = self.gt_db.getMask(image=self.gt_db.getImages(
                    timestamp=stamp)[0]).data.astype(bool)
            P = np.sum(gt).astype(float)
            TP = np.sum(sm & gt).astype(float)
            FP = np.sum(sm & (~gt)).astype(float)
            N = np.sum(~gt).astype(float)
            TN = np.sum((~sm) & (~gt)).astype(float)
            FN = np.sum((~sm) & gt).astype(float)
            self.specificity.update({stamp: TN / N})
            self.sensitivity.update({stamp: TP / P})
            self.precision.update({stamp: TP / (TP + FP)})
            self.negative_predictive_value.update({stamp: TN / (TN + FN)})
            self.false_positive_rate.update({stamp: FP / N})
            self.false_negative_rate.update({stamp: FN / (TP + FN)})
            self.false_discovery_rate.update({stamp: FP / (TP + FP)})
            self.accuracy.update({stamp: (TP + TN) / (TP + FN + TN + FP)})
            self.F1_score.update({stamp: 2 * TP / (2 * TP + FP + FN)})
            self.MCC.update({
                stamp: (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) *
                                              (TN + FP) * (TN + FN))
            })
            self.informedness.update({stamp: TP / P + TN / N - 1})
            self.markedness.update(
                {stamp: TP / (TP + FP) + TN / (TN + FN) - 1})
            self.positive_rate.update(
                {stamp: (TP + FP) / (sm.shape[0] * sm.shape[1])})
            self.LeeLiu_rate.update({
                stamp: (TP / P)**2 / ((TP + FP) / (sm.shape[0] * sm.shape[1]))
            })
Exemple #4
0
    ImgType = db.getMarkerType(name="PT_Track_Marker")
    # Positions_Log = np.asarray([[m.x, m.y, m.image.sort_index] for m in db.getMarkers(type=LogType) if not m.text.count("inf")])
    Positions_Img = np.asarray([[m.x, m.y, m.image.sort_index]
                                for m in db.getMarkers(type=ImgType)
                                if m.track.markers.count() > 3])

    # Do Correction for Position Data
    from CameraTransform import CameraTransform
    CT = CameraTransform(14, [17, 9], [4608, 2592],
                         observer_height=31.,
                         angel_to_horizon=(np.pi / 2. - 0.24) * 180 / np.pi)
    orth_x, orth_y, orth_z = CT.transCamToWorld(Positions_Img.T[:2], Z=0.525)

    # Calculate Histogramms
    cx = cy = 2
    times = np.asarray(sorted([i.timestamp for i in db.getImages()]))
    scale = 1. / (cx * cy) / ((times[-1] - times[0]).seconds / 3600.)
    hist, binx, biny = np.histogram2d(orth_x,
                                      orth_y,
                                      bins=[
                                          int(max(orth_x) - min(orth_x)) / cx,
                                          int(max(orth_y) - min(orth_y)) / cy
                                      ],
                                      range=[[min(orth_x),
                                              max(orth_x)],
                                             [min(orth_y),
                                              max(orth_y)]])
    hist *= scale
    hist[hist == 0.] = np.nan

    cx = cy = 6
Exemple #5
0
R = np.diag([r*object_size, r*object_size])  # Measurement uncertainty

State_Dist = ss.multivariate_normal(cov=Q)  # Initialize Distributions for Filter
Meas_Dist = ss.multivariate_normal(cov=R)  # Initialize Distributions for Filter

# Initialize Filter
MultiKal = MultiFilter(KalmanFilter, model, np.diag(Q),
                       np.diag(R), meas_dist=Meas_Dist, state_dist=State_Dist)
# MultiKal.LogProbabilityThreshold = -300.
MultiKal.MeasurementProbabilityThreshold = 0.
# MultiKal = MultiFilter(Filter, model)
print("Initialized Tracker")

# Init_Background from Image_Median
# Initialize segmentation with init_image and start updating the first 10 frames.
N = db.getImages().count()
init = np.array(np.median([np.asarray(db.getImage(frame=j).data, dtype=np.int)
                           for j in np.arange(0,10)], axis=0), dtype=np.int)
# VB = ViBeSegmentation(n=2, init_image=init, n_min=2, r=25, phi=1)
VB = ViBeSegmentation(n=3, init_image=init, n_min=3, r=40, phi=1)
print("Debug")
for i in range(10,20):
    mask = VB.detect(db.getImage(frame=i).data, do_neighbours=False)
print("Detecting!")


import matplotlib.pyplot as plt
# for i in range(10306,10311):
#     mask = VB.detect(db.getImage(frame=i).data, do_neighbours=False)
#     fig, ax = plt.subplots(1)
#     # ax.imshow(np.vstack((mask*2**8, db.getImage(frame=i).data)))