def fetch_meta(self, track: Track) -> Track: query = track.query maybe_track_info = self.check_cache(query) if maybe_track_info is not None: track.info = maybe_track_info return track youtube_search = VideosSearch(query, limit=1) results = youtube_search.result()["result"] if len(results) == 0: raise Exception("No results") result = results[0] if isinstance(result, dict): track_info = TrackInfo( source_id="youtube_" + result["id"], title=result["title"], artist=result["channel"]["name"], thumbnail=max(result["thumbnails"], key=lambda t: t["width"] * t["height"])["url"], duration=reduce( lambda a, b: 60 * a + b, [int(x) for x in result["duration"].split(":")], 0)) track.info = track_info self.add_to_cache(query, track) return track raise Exception("Failed to search")
def load_tracks_meta(self, metadata, tag_precedence): tracks_meta = metadata.get("tracks", []) tracks = [] # get track data for track_meta in tracks_meta: track = Track(self.get_id()) if track.load_track_meta( track_meta, self.frames_per_second, tag_precedence, self.config.min_tag_confidence, ): tracks.append(track) return tracks
def load_tracks_meta(self, metadata, include_filtered_channel, tag_precedence): tracks_meta = metadata["Tracks"] tracks = [] # get track data for track_meta in tracks_meta: track = Track(self.get_id()) if track.load_track_meta( track_meta, self.frames_per_second, include_filtered_channel, tag_precedence, self.config.min_tag_confidence, ): tracks.append(track) return tracks
def _create_new_tracks(self, clip, unmatched_regions): """Create new tracks for any unmatched regions""" new_tracks = set() for region in unmatched_regions: # make sure we don't overlap with existing tracks. This can happen if a tail gets tracked as a new object overlaps = [ track.last_bound.overlap_area(region) for track in clip.active_tracks ] if len(overlaps) > 0 and max(overlaps) > (region.area * 0.25): continue track = Track.from_region(clip, region, ClipTrackExtractor.VERSION) new_tracks.add(track) clip._add_active_track(track) self.print_if_verbose( "Creating a new track {} with region {} mass{} area {} frame {}" .format( track.get_id(), region, track.last_bound.mass, track.last_bound.area, region.frame_number, )) return new_tracks
def _track_meta_is_valid(self, track_meta): """ Tracks are valid if their confidence meets the threshold and they are not in the excluded_tags list, defined in the config. """ min_confidence = self.track_config.min_tag_confidence track_data = track_meta.get("data") if not track_data: return False track_tags = [] if "TrackTags" in track_meta: track_tags = track_meta["TrackTags"] excluded_tags = [ tag for tag in track_tags if not tag.get("automatic", False) and tag in self.config.load.excluded_tags ] if len(excluded_tags) > 0: return False track_tag = Track.get_best_human_tag( track_tags, self.config.load.tag_precedence, min_confidence ) if track_tag is None: return False tag = track_tag.get("what") confidence = track_tag.get("confidence", 0) return tag and tag not in excluded_tags and confidence >= min_confidence
def _track_meta_is_valid(self, track_meta): """ Tracks are valid if their confidence meets the threshold and they are not in the excluded_tags list, defined in the config. """ min_confidence = self.track_config.min_tag_confidence excluded_tags = self.config.excluded_tags track_data = track_meta.get("data") if not track_data: return False track_tag = Track.get_best_human_tag(track_meta, self.config.load.tag_precedence, min_confidence) if track_tag is None: return False tag = track_tag.get("what") confidence = track_tag.get("confidence", 0) return tag and tag not in excluded_tags and confidence >= min_confidence
def build_track(self, query: str) -> Track: return Track(query)
def identify_track(self, clip: Clip, track: Track): """ Runs through track identifying segments, and then returns it's prediction of what kind of animal this is. One prediction will be made for every frame. :param track: the track to identify. :return: TrackPrediction object """ # uniform prior stats start with uniform distribution. This is the safest bet, but means that # it takes a while to make predictions. When off the first prediction is used instead causing # faster, but potentially more unstable predictions. UNIFORM_PRIOR = False num_labels = len(self.classifier.labels) prediction_smooth = 0.1 smooth_prediction = None smooth_novelty = None prediction = 0.0 novelty = 0.0 try: fp_index = self.classifier.labels.index("false-positive") except ValueError: fp_index = None # go through making classifications at each frame # note: we should probably be doing this every 9 frames or so. state = None track_prediction = self.predictions.get_or_create_prediction(track) for i, region in enumerate(track.bounds_history): frame = clip.frame_buffer.get_frame(region.frame_number) track_data = track.crop_by_region(frame, region) # note: would be much better for the tracker to store the thermal references as it goes. # frame = clip.frame_buffer.get_frame(frame_number) thermal_reference = np.median(frame.thermal) # track_data = track.crop_by_region_at_trackframe(frame, i) if i % self.FRAME_SKIP == 0: # we use a tighter cropping here so we disable the default 2 pixel inset frames = Preprocessor.apply([track_data], [thermal_reference], default_inset=0) if frames is None: logging.info( "Frame {} of track could not be classified.".format( region.frame_number)) return frame = frames[0] ( prediction, novelty, state, ) = self.classifier.classify_frame_with_novelty(frame, state) # make false-positive prediction less strong so if track has dead footage it won't dominate a strong # score if fp_index is not None: prediction[fp_index] *= 0.8 # a little weight decay helps the model not lock into an initial impression. # 0.98 represents a half life of around 3 seconds. state *= 0.98 # precondition on weight, segments with small mass are weighted less as we can assume the error is # higher here. mass = region.mass # we use the square-root here as the mass is in units squared. # this effectively means we are giving weight based on the diameter # of the object rather than the mass. mass_weight = np.clip(mass / 20, 0.02, 1.0)**0.5 # cropped frames don't do so well so restrict their score cropped_weight = 0.7 if region.was_cropped else 1.0 prediction *= mass_weight * cropped_weight if smooth_prediction is None: if UNIFORM_PRIOR: smooth_prediction = np.ones([num_labels ]) * (1 / num_labels) else: smooth_prediction = prediction smooth_novelty = 0.5 else: smooth_prediction = ( 1 - prediction_smooth ) * smooth_prediction + prediction_smooth * prediction smooth_novelty = ( 1 - prediction_smooth ) * smooth_novelty + prediction_smooth * novelty track_prediction.classified_frame(region.frame_number, smooth_prediction, smooth_novelty) return track_prediction
def apply_matchings(self, regions): """ Work out the best matchings between tracks and regions of interest for the current frame. Create any new tracks required. """ scores = [] for track in self.active_tracks: for region in regions: distance, size_change = track.get_track_region_score(region) # we give larger tracks more freedom to find a match as they might move quite a bit. max_distance = np.clip(7 * (track.mass ** 0.5), 30, 95) size_change = np.clip(track.mass, 50, 500) if distance > max_distance: continue if size_change > size_change: continue scores.append((distance, track, region)) # apply matchings greedily. Low score is best. matched_tracks = set() used_regions = set() new_tracks = set() scores.sort(key=lambda record: record[0]) results = [] for (score, track, region) in scores: # don't match a track twice if track in matched_tracks or region in used_regions: continue track.add_frame(region) used_regions.add(region) matched_tracks.add(track) results.append((track, score)) # create new tracks for any unmatched regions for region in regions: if region in used_regions: continue # make sure we don't overlap with existing tracks. This can happen if a tail gets tracked as a new object overlaps = [track.bounds.overlap_area( region) for track in self.active_tracks] if len(overlaps) > 0 and max(overlaps) > (region.area * 0.25): continue track = Track() track.add_frame(region) track.start_frame = self.frame_on new_tracks.add(track) self.active_tracks.append(track) self.tracks.append(track) # check if any tracks did not find a matched region for track in [track for track in self.active_tracks if track not in matched_tracks and track not in new_tracks]: # we lost this track. start a count down, and if we don't get it back soon remove it track.frames_since_target_seen += 1 track.add_blank_frame() # remove any tracks that have not seen their target in a while self.active_tracks = [ track for track in self.active_tracks if track.frames_since_target_seen < self.config.remove_track_after_frames]
def identify_track(self, clip: Clip, track: Track): """ Runs through track identifying segments, and then returns it's prediction of what kind of animal this is. One prediction will be made for every frame. :param track: the track to identify. :return: TrackPrediction object """ # go through making classifications at each frame # note: we should probably be doing this every 9 frames or so. state = None if self.kerasmodel: track_prediction = self.classifier.classify_track(clip, track) self.predictions.prediction_per_track[ track.get_id()] = track_prediction else: track_prediction = self.predictions.get_or_create_prediction(track) for i, region in enumerate(track.bounds_history): frame = clip.frame_buffer.get_frame(region.frame_number) track_data = track.crop_by_region(frame, region) # note: would be much better for the tracker to store the thermal references as it goes. # frame = clip.frame_buffer.get_frame(frame_number) thermal_reference = np.median(frame.thermal) # track_data = track.crop_by_region_at_trackframe(frame, i) if i % self.FRAME_SKIP == 0: # we use a tighter cropping here so we disable the default 2 pixel inset frames = preprocess_segment([track_data], [thermal_reference], default_inset=0) if frames is None: logging.info( "Frame {} of track could not be classified.". format(region.frame_number)) continue frame = frames[0] ( prediction, novelty, state, ) = self.classifier.classify_frame_with_novelty( frame, state) # make false-positive prediction less strong so if track has dead footage it won't dominate a strong # score # a little weight decay helps the model not lock into an initial impression. # 0.98 represents a half life of around 3 seconds. state *= 0.98 # precondition on weight, segments with small mass are weighted less as we can assume the error is # higher here. mass = region.mass # we use the square-root here as the mass is in units squared. # this effectively means we are giving weight based on the diameter # of the object rather than the mass. mass_weight = np.clip(mass / 20, 0.02, 1.0)**0.5 # cropped frames don't do so well so restrict their score cropped_weight = 0.7 if region.was_cropped else 1.0 track_prediction.classified_frame( region.frame_number, prediction, mass_scale=mass_weight * cropped_weight, novelty=novelty, ) return track_prediction
from track.track import Track from utils import kml import format fname = "../parcours_439080.kml" path, name = kml.read_track(fname, max_points=2000) print(name, "loaded,", len(path), "points found") f = format.A4(150) print("Target size", f.px) tss = Track.from_path(path, verbose=True, format=f) print("\nTrack divided in", len(tss), "segments.") tss.load()