while os.path.basename(__exec_dir) != "src": __exec_dir = os.path.dirname(__exec_dir) sys.path.insert(0, __exec_dir) from datasets.common.utils import get_dataset from utils import ArgumentList, Run from utils.common.files import get_full_path from utils.common.logging import logging_info from utils.common.terminal import query_yes_no from utils.tfu import tfu_load_graph, tfu_set_logging from utils.visualization import draw_single_box, get_distinct_colors if __name__ == "__main__": # parse arguments argument_list = ArgumentList( description="Apply an exported model on a single image.") argument_list.add_image_filename_argument("The input image filename.", required=True) argument_list.add_model_argument("The model used for training.", default=None, required=True) argument_list.add_model_name_argument("The exported model name.", required=True) argument_list.add_dataset_argument("The dataset used for training.", default=None) argument_list.add_tf_verbosity_argument("Tensorflow verbosity.", default="info") argument_list.add_tf_min_log_level_argument( "Tensorflow minimum log level.", default=3) arguments = argument_list.parse()
# update image image.set_data(frame) def handle_key_press_event(event): """Handle any key press events in the Matplotlib window. """ if event.key == "q": plt.close(event.canvas.figure) if __name__ == "__main__": # parse arguments argument_list = ArgumentList( description= "Apply an exported model on live data like a video or a webcam.") argument_list.add_video_filename_argument("The input video filename.", required=False) argument_list.add_model_argument("The model used for training.", default=None, required=True) argument_list.add_model_name_argument("The exported model name.", required=True) argument_list.add_dataset_argument("The dataset used for training.", default=None) argument_list.add_tf_verbosity_argument("Tensorflow verbosity.", default="info") argument_list.add_tf_min_log_level_argument( "Tensorflow minimum log level.", default=3) arguments = argument_list.parse()
# skip if run does not exist if not os.path.exists(run.base_path): return # ask user to keep the run should_keep_run = query_yes_no("Should the run '{}' be kept?".format( os.path.basename(run.base_path)), default="yes") if not should_keep_run: shutil.rmtree(run.base_path) if __name__ == "__main__": # parse arguments argument_list = ArgumentList( description="Train a model for object detection.") argument_list.add_model_argument("The model to use for training.", default="ssd_vgg_300") argument_list.add_dataset_argument("The dataset to use for training.", default="voc2007") argument_list.add_dataset_split_argument( "The dataset split to use for training.", default="train", required=False) argument_list.add_random_seed_argument( "The global random seed used for determinism.", default=1807241) argument_list.add_op_random_seed_argument( "The operation random seed used for determinism.", default=1807242) argument_list.add_num_parallel_calls_argument( "Number of parallel calls for preprocessing the data.", default=6) argument_list.add_prefetch_buffer_size_argument(
__exec_dir = os.path.dirname(__exec_dir) sys.path.insert(0, __exec_dir) from data import DataProvider from data.preprocessors import BBoxPreprocessor, DefaultPreprocessor, ImagePreprocessor from datasets.common.utils import get_dataset from models.ssd.common.utils import get_model from utils import ArgumentList, AveragePrecision, Run from utils.common.logging import logging_error, logging_info, logging_eval from utils.common.terminal import query_yes_no from utils.tfu import tfu_get_uninitialized_variables, tfu_set_logging if __name__ == "__main__": # parse arguments argument_list = ArgumentList( description="Evaluate model from a run and compute mAP.") argument_list.add_run_argument("The run from which to evaluate the model.", required=True) argument_list.add_dataset_argument("The dataset to use for evaluation.", default="voc2007") argument_list.add_dataset_split_argument( "The dataset split to use for evaluation.", default="test") argument_list.add_num_parallel_calls_argument( "Number of parallel calls for preprocessing the data.", default=6) argument_list.add_prefetch_buffer_size_argument( "Buffer size for prefetching the data.", default=2) argument_list.add_batch_size_argument("Batch size for evaluation.", default=32) argument_list.add_input_device_argument("Device for processing inputs.", default="/cpu:0") argument_list.add_inference_device_argument("Device for inference.",
__exec_dir = sys.path[0] while os.path.basename(__exec_dir) != "src": __exec_dir = os.path.dirname(__exec_dir) sys.path.insert(0, __exec_dir) from utils import ArgumentList, Run from utils.common.files import get_full_path from utils.common.logging import logging_error, logging_info from utils.common.terminal import query_yes_no from utils.tfu import tfu_set_logging if __name__ == "__main__": # parse arguments argument_list = ArgumentList( description="Export the model from a run for inference.") argument_list.add_run_argument("The run from which to export the model.", required=True) argument_list.add_model_name_argument("The output model name.", required=True) argument_list.add_tf_verbosity_argument("Tensorflow verbosity.", default="info") argument_list.add_tf_min_log_level_argument( "Tensorflow minimum log level.", default=3) arguments = argument_list.parse() # load run run = Run(run_id=arguments.run) if not run.open(): logging_error("There is no run '{}'.".format(arguments.run))