def main(): parser = argparse.ArgumentParser(description="SSD Demo.") parser.add_argument( "config_file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--ckpt", type=str, default=None, help="Trained weights.") parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() detections = get_detections(cfg=cfg, ckpt=args.ckpt) json_path = pathlib.Path(cfg.OUTPUT_DIR, "test_detected_boxes.json") dump_detections(cfg, detections, json_path)
def main(): parser = argparse.ArgumentParser(description="SSD Demo.") parser.add_argument( "config_file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "video_path", type=str, metavar="FILE", help="Path to source video") parser.add_argument( "output_path", type=str, help="Output path to save video with detections") parser.add_argument("--ckpt", type=str, default=None, help="Trained weights.") parser.add_argument("--score_threshold", type=float, default=0.7) parser.add_argument("--dataset_type", default="tdt4265", type=str, help='Specify dataset type. Currently support voc and coco.') args = parser.parse_args() cfg.merge_from_file(args.config_file) cfg.freeze() print("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() print(config_str) print("Running with config:\n{}".format(cfg)) infer_video(cfg=cfg, ckpt=args.ckpt, score_threshold=args.score_threshold, video_path=args.video_path, output_path=args.output_path, dataset_type=args.dataset_type)
def main(): parser = argparse.ArgumentParser(description="SSD Demo.") parser.add_argument( "config_file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument("--ckpt", type=str, default=None, help="Trained weights.") parser.add_argument("--score_threshold", type=float, default=0.7) parser.add_argument("--images_dir", default='demo/voc', type=str, help='Specify a image dir to do prediction.') parser.add_argument( "--dataset_type", default="voc", type=str, help='Specify dataset type. Currently support voc and coco.') parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() print("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() print(config_str) print("Running with config:\n{}".format(cfg)) run_demo(cfg=cfg, ckpt=args.ckpt, score_threshold=args.score_threshold, images_dir=pathlib.Path(args.images_dir), output_dir=pathlib.Path(args.images_dir, "result"), dataset_type=args.dataset_type)
def main(): parser = argparse.ArgumentParser( description='SSD Evaluation on VOC and COCO dataset.') parser.add_argument( "config_file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "--ckpt", help= "The path to the checkpoint for test, default is the latest checkpoint.", default=None, type=str, ) parser.add_argument("--N_images", default=100, type=int, help="The number of images to check runtime with.") parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.TEST.BATCH_SIZE = 1 cfg.freeze() logger = setup_logger("SSD", cfg.OUTPUT_DIR) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) evaluation(cfg, ckpt=args.ckpt, N_images=args.N_images)
def main(): args = get_parser().parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = pathlib.Path(cfg.OUTPUT_DIR) output_dir.mkdir(exist_ok=True, parents=True) logger = setup_logger("SSD", output_dir) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) model = start_train(cfg) logger.info('Start evaluating...') torch.cuda.empty_cache() # speed up evaluating after training finished do_evaluation(cfg, model)
def main(): parser = argparse.ArgumentParser( description='Single Shot MultiBox Detector Training With PyTorch') parser.add_argument( "config_file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = pathlib.Path(cfg.OUTPUT_DIR) output_dir.mkdir(exist_ok=True, parents=True) logger = setup_logger("SSD", output_dir) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) model = start_train(cfg) logger.info('Start evaluating...') torch.cuda.empty_cache() # speed up evaluating after training finished do_evaluation(cfg, model)
import pathlib import torch import os from ssd.config.defaults import cfg from train import get_parser if __name__ == "__main__": parser = get_parser() args = parser.parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # Change this MODEL_FILE = "model_021000.pth" checkpoint = pathlib.Path(cfg.OUTPUT_DIR, MODEL_FILE) assert checkpoint.is_file() # Create a new directory for new training run new_dir = checkpoint.parent.parent new_dir = pathlib.Path( checkpoint.parent.parent, checkpoint.parent.stem.replace("_waymo", "") + "_tdt4265") # Copy new checkpoint new_dir.mkdir() new_checkpoint_path = new_dir.joinpath("waymo_model.pth") # Read last checkpoint written with open(new_dir.joinpath("last_checkpoint.txt"), "w") as fp: fp.write(f"{new_checkpoint_path}") # Load last checkpoint and only transfer learn parameters from the model (not optimizer etc) new_checkpoint = {}
cfg.INPUT.IMAGE_SIZE[1], fill=False, edgecolor="black")) do_plot = False # Only plot prior boxes at middle, for visability indice_to_plot = len(indices_to_visualize) / 2 for i, idx in enumerate(indices_to_visualize): prior = priors_as_location[idx] color = colors[i] if i >= (indice_to_plot - len(aspect_ratio_indices) // 2) and i < ( indice_to_plot + len(aspect_ratio_indices) // 2): do_plot = True else: do_plot = False plot_bbox(ax, prior, color, do_plot, PLOT_CIRCLE) plt.show() if __name__ == "__main__": config_path = "configs/train_rdd2020.yaml" cfg.merge_from_file(config_path) cfg.freeze() visualize_validation_set(cfg) #visualize_training_set(cfg) #visualize_prior_boxes(cfg, layer=3)