def parse_command_line(): parser = argparse.ArgumentParser(description="Trains Caffe model against prepared data") parser.add_argument("--log_path", help="The path to where to place log files", type=str, default="logs") parser.add_argument("--log_num", help="""Number that will be appended to log files; this will be automatically padded and added with zeros, such as output00001.log""", type=int, default=1) parser.add_argument("--note", help="Adds extra note into training logs.", type=str, default=None) parser.add_argument("--solver", help="The path to our Caffe solver prototxt file", type=str, default="src/caffe_model/bvlc_alexnet/solver.prototxt") parser.add_argument("--input_weight_file", help="""A pre-trained Caffe model that we will use to start training with in order to fine-tune from""", type=str, default="src/caffe_model/bvlc_alexnet/bvlc_alexnet.caffemodel") parser.add_argument("--output_weight_file", help="""Where to place the final, trained Caffe model""", type=str, default="logs/latest_bvlc_alexnet_finetuned.caffemodel") args = vars(parser.parse_args()) caffe_home = utils.assert_caffe_setup() # Ensure the random number generator always starts from the same place for consistent tests. random.seed(0) log_path = os.path.abspath(args["log_path"]) log_num = args["log_num"] (output_ending, output_log_prefix, output_log_file) = utils.get_log_path_details(log_path, log_num) solver = os.path.abspath(args["solver"]) input_weight_file = os.path.abspath(args["input_weight_file"]) output_weight_file = os.path.abspath(args["output_weight_file"]) train(caffe_home, log_path, output_log_file, solver, input_weight_file, output_weight_file, args["note"])
def parse_command_line(): parser = argparse.ArgumentParser(description="""Tests a trained Caffe model to see how well it does, generating quality graphs and statistics""") parser.add_argument("--log_path", help="The path to where to place log files and graphs", type=str, default="logs") parser.add_argument("--log_num", help="""Number that will be appended to log files; this will be automatically padded and added with zeros, such as output00001.log""", type=int, default=1) parser.add_argument("--input_weight_file", help="""The trained and fine-tuned Caffe model that we will be testing; defaults to the last trained model from train.py""", type=str, default="logs/latest_bvlc_alexnet_finetuned.caffemodel") parser.add_argument("--note", help="Adds extra note onto generated quality graphs.", type=str, default="") parser.add_argument("--solver", help="The path to our Caffe solver prototxt file", type=str, default="src/caffe_model/bvlc_alexnet/solver.prototxt") parser.add_argument("--deploy", help="""Path to our Caffe deploy/inference time prototxt file""", type=str, default="src/caffe_model/bvlc_alexnet/deploy.prototxt") parser.add_argument("--threshold", help="""The percentage threshold over which we assume something is a cloud. Note that this value is from 0.0 to 100.0""", type=float, default=0.1) parser.add_argument("--validation_leveldb", help="""Path to where the validation leveldb file is""", type=str, default="data/leveldb/validation_leveldb") parser.add_argument("--width", help="Width of image during training", type=int, default=256) parser.add_argument("--height", help="Height of image during training", type=int, default=256) parser.add_argument("--inference_width", help="Width of image during training", type=int, default=227) parser.add_argument("--inference_height", help="Height of image during training", type=int, default=227) parser.add_argument("--training_mean_pickle", help="Path to pickled mean values", type=str, default="data/imagenet/imagenet_mean.npy") args = vars(parser.parse_args()) print "Testing trained model..." caffe_home = utils.assert_caffe_setup() # Ensure the random number generator always starts from the same place for consistent tests. random.seed(0) log_path = os.path.abspath(args["log_path"]) log_num = args["log_num"] (output_ending, output_log_prefix, output_log_file) = utils.get_log_path_details(log_path, log_num) output_graph_path = output_log_prefix (training_details, validation_details) = utils.parse_logs(log_path, output_log_file) plot_results(training_details, validation_details, args["note"], output_graph_path, args["solver"]) validation_leveldb = os.path.abspath(args["validation_leveldb"]) deploy = os.path.abspath(args["deploy"]) input_weight_file = os.path.abspath(args["input_weight_file"]) training_mean_pickle = os.path.abspath(args["training_mean_pickle"]) predict.test_validation(args["threshold"], output_log_prefix, validation_leveldb, deploy, args["width"], args["height"], args["inference_width"], args["inference_height"], input_weight_file, training_mean_pickle)
def parse_command_line(): parser = argparse.ArgumentParser(description="""Prepares data for training via Caffe""") parser.add_argument("--input_metadata", help="Path to where our bounding box metadata is", type=str, default="data/planetlab/metadata/annotated.json") parser.add_argument("--input_images", help="Path to where our unbounded images are", type=str, default="data/planetlab/metadata") parser.add_argument("--output_images", help="Path to place our cropped, bounded images", type=str, default="data/planetlab/images/bounded") parser.add_argument("--output_leveldb", help="Path to place our prepared leveldb directories", type=str, default="data/leveldb") parser.add_argument("--width", help="Width of image at training time (it will be scaled to this)", type=int, default=256) parser.add_argument("--height", help="Height of image at training time (it will be scaled to this)", type=int, default=256) parser.add_argument("--log_path", help="The path to where to place log files", type=str, default="logs") parser.add_argument("--log_num", help="""Number that will be appended to log files; this will be automatically padded and added with zeros, such as output00001.log""", type=int, default=1) parser.add_argument("--do_augmentation", help="Whether to do data augmentation", dest="do_augmentation", action="store_true") parser.set_defaults(do_augmentation=False) args = vars(parser.parse_args()) utils.assert_caffe_setup() # Ensure the random number generator always starts from the same place for consistent tests. random.seed(0) log_path = os.path.abspath(args["log_path"]) log_num = args["log_num"] (output_ending, output_log_prefix, output_log_file) = utils.get_log_path_details(log_path, log_num) input_metadata = os.path.abspath(args["input_metadata"]) input_images = os.path.abspath(args["input_images"]) output_images = os.path.abspath(args["output_images"]) output_leveldb = os.path.abspath(args["output_leveldb"]) prepare_data(input_metadata, input_images, output_images, output_leveldb, args["width"], args["height"], args["do_augmentation"], output_log_prefix)
def parse_command_line(): parser = argparse.ArgumentParser( description="Trains Caffe model against prepared data") parser.add_argument("--log_path", help="The path to where to place log files", type=str, default="logs") parser.add_argument( "--log_num", help="""Number that will be appended to log files; this will be automatically padded and added with zeros, such as output00001.log""", type=int, default=1) parser.add_argument("--note", help="Adds extra note into training logs.", type=str, default=None) parser.add_argument("--solver", help="The path to our Caffe solver prototxt file", type=str, default="src/caffe_model/bvlc_alexnet/solver.prototxt") parser.add_argument( "--input_weight_file", help="""A pre-trained Caffe model that we will use to start training with in order to fine-tune from""", type=str, default="src/caffe_model/bvlc_alexnet/bvlc_alexnet.caffemodel") parser.add_argument( "--output_weight_file", help="""Where to place the final, trained Caffe model""", type=str, default="logs/latest_bvlc_alexnet_finetuned.caffemodel") args = vars(parser.parse_args()) caffe_home = utils.assert_caffe_setup() # Ensure the random number generator always starts from the same place for consistent tests. random.seed(0) log_path = os.path.abspath(args["log_path"]) log_num = args["log_num"] (output_ending, output_log_prefix, output_log_file) = utils.get_log_path_details(log_path, log_num) solver = os.path.abspath(args["solver"]) input_weight_file = os.path.abspath(args["input_weight_file"]) output_weight_file = os.path.abspath(args["output_weight_file"]) train(caffe_home, log_path, output_log_file, solver, input_weight_file, output_weight_file, args["note"])
def parse_command_line(): parser = argparse.ArgumentParser( description="""Prepares data for training via Caffe""") parser.add_argument("--input_metadata", help="Path to where our bounding box metadata is", type=str, default="data/planetlab/metadata/annotated.json") parser.add_argument("--input_images", help="Path to where our unbounded images are", type=str, default="data/planetlab/metadata") parser.add_argument("--output_images", help="Path to place our cropped, bounded images", type=str, default="data/planetlab/images/bounded") parser.add_argument("--output_leveldb", help="Path to place our prepared leveldb directories", type=str, default="data/leveldb") parser.add_argument( "--width", help="Width of image at training time (it will be scaled to this)", type=int, default=256) parser.add_argument( "--height", help="Height of image at training time (it will be scaled to this)", type=int, default=256) parser.add_argument("--log_path", help="The path to where to place log files", type=str, default="logs") parser.add_argument( "--log_num", help="""Number that will be appended to log files; this will be automatically padded and added with zeros, such as output00001.log""", type=int, default=1) parser.add_argument("--do_augmentation", help="Whether to do data augmentation", dest="do_augmentation", action="store_true") parser.set_defaults(do_augmentation=False) args = vars(parser.parse_args()) utils.assert_caffe_setup() # Ensure the random number generator always starts from the same place for consistent tests. random.seed(0) log_path = os.path.abspath(args["log_path"]) log_num = args["log_num"] (output_ending, output_log_prefix, output_log_file) = utils.get_log_path_details(log_path, log_num) input_metadata = os.path.abspath(args["input_metadata"]) input_images = os.path.abspath(args["input_images"]) output_images = os.path.abspath(args["output_images"]) output_leveldb = os.path.abspath(args["output_leveldb"]) prepare_data(input_metadata, input_images, output_images, output_leveldb, args["width"], args["height"], args["do_augmentation"], output_log_prefix)
def parse_command_line(): parser = argparse.ArgumentParser( description="""Tests a trained Caffe model to see how well it does, generating quality graphs and statistics""") parser.add_argument("--log_path", help="The path to where to place log files and graphs", type=str, default="logs") parser.add_argument( "--log_num", help="""Number that will be appended to log files; this will be automatically padded and added with zeros, such as output00001.log""", type=int, default=1) parser.add_argument( "--input_weight_file", help="""The trained and fine-tuned Caffe model that we will be testing; defaults to the last trained model from train.py""", type=str, default="logs/latest_bvlc_alexnet_finetuned.caffemodel") parser.add_argument("--note", help="Adds extra note onto generated quality graphs.", type=str, default="") parser.add_argument("--solver", help="The path to our Caffe solver prototxt file", type=str, default="src/caffe_model/bvlc_alexnet/solver.prototxt") parser.add_argument( "--deploy", help="""Path to our Caffe deploy/inference time prototxt file""", type=str, default="src/caffe_model/bvlc_alexnet/deploy.prototxt") parser.add_argument("--threshold", help="""The percentage threshold over which we assume something is a cloud. Note that this value is from 0.0 to 100.0""", type=float, default=0.1) parser.add_argument( "--validation_leveldb", help="""Path to where the validation leveldb file is""", type=str, default="data/leveldb/validation_leveldb") parser.add_argument("--width", help="Width of image during training", type=int, default=256) parser.add_argument("--height", help="Height of image during training", type=int, default=256) parser.add_argument("--inference_width", help="Width of image during training", type=int, default=227) parser.add_argument("--inference_height", help="Height of image during training", type=int, default=227) parser.add_argument("--training_mean_pickle", help="Path to pickled mean values", type=str, default="data/imagenet/imagenet_mean.npy") args = vars(parser.parse_args()) print "Testing trained model..." caffe_home = utils.assert_caffe_setup() # Ensure the random number generator always starts from the same place for consistent tests. random.seed(0) log_path = os.path.abspath(args["log_path"]) log_num = args["log_num"] (output_ending, output_log_prefix, output_log_file) = utils.get_log_path_details(log_path, log_num) output_graph_path = output_log_prefix (training_details, validation_details) = utils.parse_logs(log_path, output_log_file) plot_results(training_details, validation_details, args["note"], output_graph_path, args["solver"]) validation_leveldb = os.path.abspath(args["validation_leveldb"]) deploy = os.path.abspath(args["deploy"]) input_weight_file = os.path.abspath(args["input_weight_file"]) training_mean_pickle = os.path.abspath(args["training_mean_pickle"]) predict.test_validation(args["threshold"], output_log_prefix, validation_leveldb, deploy, args["width"], args["height"], args["inference_width"], args["inference_height"], input_weight_file, training_mean_pickle)