def create_annotation_collection(model_name, user_id, video_id, concept_ids): time_now = datetime.datetime.now().strftime(r"%y-%m-%d_%H:%M:%S") collection_name = '_'.join([model_name, str(video_id), time_now]) description = f"By {model_name} on video {video_id} at {time_now}" concept_names = pd_query( """ SELECT name FROM concepts WHERE id IN %s """, (tuple(concept_ids),) )['name'].tolist() cursor.execute( """ INSERT INTO annotation_collection (name, description, users, videos, concepts, tracking, conceptid) VALUES (%s, %s, %s, %s, %s, %s, %s) RETURNING id """, (collection_name, description, [user_id], [video_id], concept_names, False, concept_ids) ) con.commit() collection_id = int(cursor.fetchone()[0]) return collection_id
def upload_predict_progress(count, videoid, total_count, status): ''' For updating the predict_progress psql database, which tracks prediction and video generation status. Arguments: count - frame of video (or index of annotation) being processed videoid - video being processed total_count - total number of frames in the video (or number of predictions + annotations) status - Indicates whether processing video or drawing annotation boxes ''' print( f'count: {count} total_count: {total_count} vid: {videoid} status: {status}' ) if (count == 0): cursor.execute( ''' UPDATE predict_progress SET framenum=%s, status=%s, totalframe=%s''', ( count, status, total_count, )) con.commit() return if (total_count == count): count = -1 cursor.execute( ''' UPDATE predict_progress SET framenum=%s''', (count, )) con.commit()
def end_predictions(): # Status level 4 on a video means that predictions have completed. cursor.execute(""" UPDATE predict_progress SET status=4 """) con.commit()
def reset_model_params(): """ Reset the model_params table """ print("resetting model_params") cursor.execute(""" Update model_params SET epochs = 0, min_images=0, model='', annotation_collections=ARRAY[]:: integer[], verified_only=null, include_tracking=null, version=0 WHERE option='train' """) con.commit()
def reset_predict_params(): """ Reset the predict_params table """ print("resetting model_params") cursor.execute( """ UPDATE predict_params SET model='', userid=-1, concepts=ARRAY[]::integer[], upload_annotations=false, videos=ARRAY[]::integer[], version='0', create_collection=false """ ) con.commit()
def evaluate(video_id, model_username, concepts, upload_annotations=False, userid=None, create_collection=False): # file format: (video_id)_(model_name)-(version).mp4 if create_collection: if not upload_annotations: raise ValueError("cannot create new annotation collection if " "annotations aren't uploaded") if userid is None: raise ValueError("userid is None, cannot create new collection") collection_id = create_annotation_collection(model_username, userid, video_id, concepts) else: collection_id = None filename = str(video_id) + "_" + model_username + ".mp4" print("ai video filename: {0}".format(filename)) results, annotations = predict.predict_on_video( video_id, config.WEIGHTS_PATH, concepts, filename, upload_annotations, userid, collection_id) if (results.empty): return username_split = model_username.split('-') version = username_split[-1] model_name = '-'.join(username_split[:-1]) # add the entry to ai_videos cursor.execute(''' INSERT INTO ai_videos (name, videoid, version, model_name) VALUES (%s, %s, %s, %s)''', (filename, video_id, version, model_name) ) con.commit() print("done predicting") metrics = score_predictions( annotations, results, config.EVALUATION_IOU_THRESH, concepts ) concept_counts = get_counts(results, annotations) metrics = metrics.set_index("conceptid").join(concept_counts) metrics.to_csv("metrics" + str(video_id) + ".csv") # upload the data to s3 bucket print("uploading to s3 folder") s3.upload_file( "metrics" + str(video_id) + ".csv", config.S3_BUCKET, config.S3_METRICS_FOLDER + filename.replace("mp4", "csv"), ExtraArgs={"ContentType": "application/vnd.ms-excel"}, ) print(metrics) con.commit()
def setup_predict_progress(verify_videos): """Reset the predict progress table for new predictions""" # Just to be sure in case of web app not deleting the progress # we clear the prediction progress table cursor.execute("""DELETE FROM predict_progress""") con.commit() cursor.execute( """ INSERT INTO predict_progress (videoid, current_video, total_videos) VALUES (%s, %s, %s)""", (0, 0, len(verify_videos)), ) con.commit()
def evaluate_videos(concepts, verify_videos, user_model, upload_annotations=False, userid=None, create_collection=False): """ Run evaluate on all the evaluation videos """ # We go one by one as multiprocessing ran into memory issues for video_id in verify_videos: cursor.execute( f"""UPDATE predict_progress SET videoid = {video_id}, current_video = current_video + 1""" ) con.commit() evaluate(video_id, user_model, concepts, upload_annotations, userid, create_collection) end_predictions()
def create_model_user(new_version, model_params, user_model): """Insert a new user for this model version, then update the model_versions table with the new model version """ print("creating new user, updating model_versions table") cursor.execute( """ INSERT INTO users (username, password, admin) VALUES (%s, 0, null) RETURNING *""", (user_model, ), ) con.commit() model_user_id = int(cursor.fetchone()[0]) # Update the model_versions table with the new user cursor.execute( """ INSERT INTO model_versions (epochs, min_images, model, annotation_collections, verified_only, include_tracking, userid, version, timestamp) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) """, (int(model_params["epochs"]), int(model_params["min_images"]), model_params["model"], model_params["annotation_collections"], bool(model_params["verified_only"]), bool(model_params["include_tracking"]), model_user_id, new_version, datetime.now())) con.commit() return model_user_id
""" SELECT * FROM models LEFT JOIN users u ON u.id=userid WHERE name=%s """, ("testv3",), ) model = cursor.fetchone() video_id = 86 concepts = model[2] userid = "270" model_username = "******" cursor.execute("""DELETE FROM predict_progress""") con.commit() cursor.execute( """ INSERT INTO predict_progress (videoid, current_video, total_videos) VALUES (%s, %s, %s)""", (0, 0, 1), ) con.commit() cursor.execute( """UPDATE predict_progress SET videoid = 86, current_video = current_video + 1""" ) con.commit() evaluate(video_id, model_username, concepts) cursor.execute(''' DELETE FROM predict_progress
def delete_model_user(model_user_id): cursor.execute("""DELETE FROM model_versions WHERE userid=%s""", (model_user_id, )) cursor.execute("""DELETE FROM users WHERE id=%s""", (model_user_id, )) con.commit()
def predict_on_video(videoid, model_weights, concepts, filename, upload_annotations=False, userid=None, collection_id=None): vid_filename = pd_query(f''' SELECT * FROM videos WHERE id ={videoid}''').iloc[0].filename print("Loading Video.") frames, fps = get_video_frames(vid_filename, videoid) # Get biologist annotations for video printing_with_time("Before database query") tuple_concept = '' if len(concepts) == 1: tuple_concept = f''' = {str(concepts[0])}''' else: tuple_concept = f''' in {str(tuple(concepts))}''' print(concepts) annotations = pd_query(f''' SELECT x1, y1, x2, y2, conceptid as label, null as confidence, null as objectid, videowidth, videoheight, ROUND(timeinvideo*{fps}) as frame_num FROM annotations WHERE videoid={videoid} AND userid in {str(tuple(config.GOOD_USERS))} AND conceptid {tuple_concept}''') print(annotations) printing_with_time("After database query") printing_with_time("Resizing annotations.") annotations = annotations.apply(resize, axis=1) annotations = annotations.drop(['videowidth', 'videoheight'], axis=1) printing_with_time("Done resizing annotations.") print("Initializing Model") model = init_model(model_weights) printing_with_time("Predicting") results, frames = predict_frames(frames, fps, model, videoid) if (results.empty): print("no predictions") return results, annotations results = propagate_conceptids(results, concepts) results = length_limit_objects(results, config.MIN_FRAMES_THRESH) # interweb human annotations and predictions if upload_annotations: printing_with_time("Uploading annotations") # filter results down to middle frames mid_frame_results = get_final_predictions(results) # upload these annotations mid_frame_results.apply(lambda prediction: handle_annotation( prediction, frames, videoid, config.RESIZED_HEIGHT, config. RESIZED_WIDTH, userid, fps, collection_id), axis=1) con.commit() printing_with_time("Generating Video") generate_video(filename, frames, fps, results, concepts, videoid, annotations) printing_with_time("Done generating") return results, annotations