def evaluate_runtime(start_time, detector_class, frames_iter, job_name): """ Please DO NOT modify this part of code or the eval_kit Modification of the evaluation toolkit could result in cancellation of your award. In this function we create the detector instance. And evaluate the wall time for performing DeeperForensicsDetector. """ # initialize the detector logging.info("Initializing face detector.") try: detector = detector_class() except: # send errors to the eval frontend raise logging.info("Detector initialized.") # run the images one-by-one and get runtime output_probs = {} output_times = {} num_frames = {} eval_cnt = 0 logging.info("Starting runtime evaluation") for video_id, frames in frames_iter: time_before = time.time() try: prob = detector.predict(frames) assert isinstance(prob, float) num_frames[video_id] = len(frames) output_probs[video_id] = prob except: # send errors to the eval frontend logging.error("Video id failed: {}".format(video_id)) raise elapsed = time.time() - time_before output_times[video_id] = elapsed logging.info("video {} run time: {}".format(video_id, elapsed)) eval_cnt += 1 if eval_cnt % 100 == 0: logging.info("Finished {} videos".format(eval_cnt)) logging.info( "all videos finished, uploading evaluation outputs for evaluation.") total_time = time.time() - start_time # send evaluation output to the server upload_eval_output(output_probs, output_times, num_frames, job_name, total_time)
def evaluate_runtime(detector_class, image_iter, job_id): """ Please DO NOT modify this part of code or the eval_kit Modification of the evaluation toolkit could result in cancellation of your award. In this function we create the detector instance. And evaluate the wall time for performing face detection. """ # initialize the detector logging.info("Initializing face detector.") try: detector = detector_class() except: # send errors to the eval frontend raise logging.info("Detector initialized.") # run the images one-by-one and get runtime overall_time = 0 output_boxes = {} output_time = {} eval_cnt = 0 logging.info("Starting runtime evaluation") for image_id, image in image_iter: time_before = time.time() try: boxes = detector.process_image(image) assert isinstance(boxes, np.ndarray) output_boxes[image_id] = boxes except: # send errors to the eval frontend logging.error("Image id failed: {}".format(image_id)) raise elapsed = time.time() - time_before output_time[image_id] = elapsed logging.info("image {} run time: {}".format(image_id, elapsed)) overall_time += elapsed eval_cnt += 1 if eval_cnt % 100 == 0: logging.info("Finished {} images".format(eval_cnt)) logging.info( "all image finished, uploading evaluation outputs for evaluation.") # send evaluation output to the server upload_eval_output(output_boxes, output_time, job_id)
def evaluate_runtime(detector_class, image_iter, job_name): """ Please DO NOT modify this part of code or the eval_kit Modification of the evaluation toolkit could result in cancellation of your award. In this function we create the detector instance. And evaluate the wall time for performing CelebASpoofDetector. """ # initialize the detector logging.info("Initializing detector.") try: detector = detector_class() except: # send errors to the eval frontend raise logging.info("Detector initialized.") # run the images one-by-one and get runtime output_probs = {} output_times = {} eval_cnt = 0 logging.info("Starting runtime evaluation") for image_id, image in image_iter: time_before = time.time() try: prob = detector.predict(image) # assert isinstance(prob, float) for idx, i in enumerate(image_id): output_probs[i] = float(prob[idx][1]) except: # send errors to the eval frontend logging.error("Image id failed: {}".format(image_id)) raise eval_cnt += len(image) if eval_cnt % 5 == 0: logging.info("Finished {} images".format(eval_cnt)) logging.info( "All images finished, uploading evaluation outputs for evaluation.") # send evaluation output to the server upload_eval_output(output_probs, job_name)