def test_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = YoloV1 config.DATASET_CLASS = LmThingsOnATable config.IS_DEBUG = False config.IMAGE_SIZE = [70, 70] config.BATCH_SIZE = 4 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.TASK = Tasks.OBJECT_DETECTION # network model config config.NETWORK = EasyDict() config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE environment.init("test_yolov_1") prepare_dirs(recreate=True) start_training(config)
def test_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = YoloV2 config.DATASET_CLASS = Pascalvoc2007 config.IS_DEBUG = False config.IMAGE_SIZE = [128, 160] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.TASK = Tasks.OBJECT_DETECTION # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.DATA_FORMAT = "NHWC" # dataset config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NHWC" environment.init("test_yolo_v2") prepare_dirs(recreate=True) start_training(config)
def run(experiment_id, restore_path=None, image_size=(None, None), image=DEFAULT_INFERENCE_TEST_DATA_IMAGE, config_file=None): environment.init(experiment_id) config = config_util.load_from_experiment() if config_file: config = config_util.merge(config, config_util.load(config_file)) config.BATCH_SIZE = 1 config.NETWORK.BATCH_SIZE = 1 config.DATASET.BATCH_SIZE = 1 if list(image_size) != [None, None]: config.IMAGE_SIZE = list(image_size) config.NETWORK.IMAGE_SIZE = list(image_size) # override pre processes image size. if config.PRE_PROCESSOR: config.PRE_PROCESSOR.set_image_size(image_size) # override post processes image size. if config.POST_PROCESSOR: config.POST_PROCESSOR.set_image_size(image_size) print("Override IMAGE_SIZE", config.IMAGE_SIZE) executor.init_logging(config) config_util.display(config) return _export(config, restore_path, image)
def test_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = YoloV2 config.DATASET_CLASS = LmThingsOnATable config.IS_DEBUG = False config.IMAGE_SIZE = [128, 160] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_STEPS = 1 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.IS_DISTRIBUTION = False # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.DATA_FORMAT = "NCHW" # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NCHW" environment.init("test_yolo_v2") prepare_dirs(recreate=True) start_training(config)
def run_server(server_info, experiment_id, config_file, restore_path): environment.init(experiment_id) if config_file is None: config = config_util.load_from_experiment() else: config = config_util.merge(config, config_util.load(config_file)) if restore_path is None: restore_file = search_restore_filename(environment.CHECKPOINTS_DIR) restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file) inference_model = Inference(config, restore_path) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(server_info) s.listen(32) print("boot: {}:{}".format(*server_info)) while True: client_conn, client_addr = s.accept() print("\033[Kfrom: {}:{}".format(*client_addr), end="\r") try: th = threading.Thread(target=receive_and_send, args=(client_conn, inference_model), daemon=True) th.start() # th.join() # receive_and_send(client_conn, inference_model) except BrokenPipeError: print("Send data aborted!") pass
def run(experiment_id, restore_path, config_file, bit, unquant_layers): if config_file is None and experiment_id is None: raise Exception("config_file or experiment_id are required") if experiment_id: environment.init(experiment_id) config = config_util.load_from_experiment() if config_file: config = config_util.merge(config, config_util.load(config_file)) if restore_path is None: restore_file = executor.search_restore_filename( environment.CHECKPOINTS_DIR) restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file) if not os.path.exists("{}.index".format(restore_path)): raise Exception( "restore file {} dont exists.".format(restore_path)) else: experiment_id = "profile" environment.init(experiment_id) config = config_util.load(config_file) executor.init_logging(config) config_util.display(config) _profile(config, restore_path, bit, unquant_layers)
def test_training(): """Verify only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = LMBiSeNet config.DATASET_CLASS = DummyCamvid config.IS_DEBUG = False config.IMAGE_SIZE = [128, 160] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_STEPS = 1 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.IS_DISTRIBUTION = False # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.DATA_FORMAT = "NHWC" # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NHWC" environment.init("test_lm_bisenet") prepare_dirs(recreate=True) start_training(config)
def test_training(): """Test only no error raised.""" config = EasyDict() config.NETWORK_CLASS = Darknet config.DATASET_CLASS = Dummy config.IS_DEBUG = False config.IMAGE_SIZE = [28, 14] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.TASK = Tasks.CLASSIFICATION # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE environment.init("test_darknet") prepare_dirs(recreate=True) start_training(config)
def test_example_classification_config_yaml(): """ Test that export config and meta yaml from example classification config python. """ config_file = os.path.join("configs", "example", "classification.py") config = _load_py(config_file) config_yaml = os.path.join("configs", "example", "classification.yaml") config_meta = os.path.join("configs", "example", "classification_meta.yaml") environment.init("test_example_classification_config_yaml") saved_config, saved_meta = save_yaml(environment.EXPERIMENT_DIR, config) print(saved_meta) with open(config_yaml) as f: expected = f.read() with open(saved_config) as f: data = f.read() assert expected == data with open(config_meta) as f: expected = f.read() with open(saved_meta) as f: data = f.read() assert expected == data
def test_quantize_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = FlowNetSV1Quantized config.DATASET_CLASS = FlyingChairs config.IS_DEBUG = False config.IMAGE_SIZE = [384, 512] config.BATCH_SIZE = 8 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.IS_DISTRIBUTION = False # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.DATA_FORMAT = "NHWC" config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2.0} config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = None config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NHWC" config.DATASET.VALIDATION_RATE = 0.2 config.DATASET.VALIDATION_SEED = 2019 config.DATASET.AUGMENTOR = Sequence([ # Geometric transformation FlipLeftRight(0.5), FlipTopBottom(0.5), Translate(-0.2, 0.2), Rotate(-17, +17), Scale(1.0, 2.0), # Pixel-wise augmentation Brightness(0.8, 1.2), Contrast(0.2, 1.4), Color(0.5, 2.0), Gamma(0.7, 1.5), # Hue(-128.0, 128.0), GaussianNoise(0.0, 10.0) ]) config.DATASET.PRE_PROCESSOR = Sequence([ DevideBy255(), ]) environment.init("test_flownet_s_v1_quantize") prepare_dirs(recreate=True) start_training(config)
def main(output_file_base, metrics_keys, steps, experiment_id): environment.init(experiment_id) output( environment.TENSORBOARD_DIR, environment.EXPERIMENT_DIR, metrics_keys, steps, output_file_base="metrics", )
def test_training(): """Test only no error raised.""" config = EasyDict() config.NETWORK_CLASS = LmSinglePoseV1Quantize config.DATASET_CLASS = MscocoSinglePersonKeypoints config.IS_DEBUG = False config.IMAGE_SIZE = [160, 160] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.IS_DISTRIBUTION = False config.TASK = Tasks.KEYPOINT_DETECTION # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2.0} config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = Sequence([ ResizeWithJoints(image_size=config.IMAGE_SIZE), JointsToGaussianHeatmap(image_size=config.IMAGE_SIZE, stride=2), DivideBy255() ]) config.DATASET.BATCH_SIZE = config.BATCH_SIZE environment.init("test_lm_single_pose_v1") prepare_dirs(recreate=True) start_training(config)
def main(network, dataset, config_file, experiment_id, restore_path): environment.init(experiment_id) config = config_util.load_from_experiment() if config_file: config = config_util.merge(config, config_util.load(config_file)) if network: network_class = module_loader.load_network_class(network) config.NETWORK_CLASS = network_class if dataset: dataset_class = module_loader.load_dataset_class(dataset) config.DATASET_CLASS = dataset_class executor.init_logging(config) config_util.display(config) evaluate(config, restore_path)
def run(network, dataset, config_file, experiment_id, recreate): environment.init(experiment_id) config = config_util.load(config_file) if network: network_class = module_loader.load_network_class(network) config.NETWORK_CLASS = network_class if dataset: dataset_class = module_loader.load_dataset_class(dataset) config.DATASET_CLASS = dataset_class config_util.display(config) executor.init_logging(config) executor.prepare_dirs(recreate) config_util.copy_to_experiment_dir(config_file) config_util.save_yaml(environment.EXPERIMENT_DIR, config) start_training(config)
def test_training(): """Test only no error raised.""" config = EasyDict() config.NETWORK_CLASS = YoloV2Quantize config.DATASET_CLASS = Pascalvoc2007 config.IS_DEBUG = False config.IMAGE_SIZE = [128, 160] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.TASK = Tasks.OBJECT_DETECTION # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = { 'bit': 2, 'max_value': 2.0 } config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE environment.init("test_yolov_2_quantize") prepare_dirs(recreate=True) start_training(config)
def main(model): if model == "yolov2": weight_file = 'inputs/yolo-voc.weights' experiment_id = "convert_weight_from_darknet/yolo_v2" config_file = "configs/convert_weight_from_darknet/yolo_v2.py" if model == "darknet19": weight_file = 'inputs/darknet19_448.weights' experiment_id = "convert_weight_from_darknet/darknet19" config_file = "configs/convert_weight_from_darknet/darknet19.py" recreate = True environment.init(experiment_id) executor.prepare_dirs(recreate) config = config_util.load(config_file) config_util.display(config) config_util.copy_to_experiment_dir(config_file) convert(config, weight_file)
def test_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = LmResnetQuantize config.DATASET_CLASS = Dummy config.IS_DEBUG = False config.IMAGE_SIZE = [32, 32] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.TASK = Tasks.CLASSIFICATION # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2} config.NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE environment.init("test_lm_resnet_quantize") prepare_dirs(recreate=True) start_training(config)
def run(input_dir, output_dir, experiment_id, config_file, restore_path, save_images): environment.init(experiment_id) config = config_util.load_from_experiment() if config_file: config = config_util.merge(config, config_util.load(config_file)) if not os.path.isdir(input_dir): raise Exception("Input directory {} does not exist.".format(input_dir)) if restore_path is None: restore_file = search_restore_filename(environment.CHECKPOINTS_DIR) restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file) print("Restore from {}".format(restore_path)) if not os.path.exists("{}.index".format(restore_path)): raise Exception("restore file {} dont exists.".format(restore_path)) print("---- start predict ----") _run(input_dir, output_dir, config, restore_path, save_images) print("---- end predict ----")
def _run(config_file, experiment_id, restore_path, image_size, step_size, cpu): if experiment_id: environment.init(experiment_id) config = config_util.load_from_experiment() if config_file: config = config_util.merge(config, config_util.load(config_file)) if restore_path is None: restore_file = executor.search_restore_filename( environment.CHECKPOINTS_DIR) restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file) if not os.path.exists("{}.index".format(restore_path)): raise Exception( "restore file {} dont exists.".format(restore_path)) else: experiment_id = "measure_latency" environment.init(experiment_id) config = config_util.load(config_file) config.BATCH_SIZE = 1 config.NETWORK.BATCH_SIZE = 1 config.DATASET.BATCH_SIZE = 1 if list(image_size) != [None, None]: config.IMAGE_SIZE = list(image_size) config.NETWORK.IMAGE_SIZE = list(image_size) # override pre processes image size. if config.PRE_PROCESSOR: config.PRE_PROCESSOR.set_image_size(image_size) # override post processes image size. if config.POST_PROCESSOR: config.POST_PROCESSOR.set_image_size(image_size) print("Override IMAGE_SIZE", config.IMAGE_SIZE) executor.init_logging(config) config_util.display(config) overall_times, only_network_times = _measure_time(config, restore_path, step_size) overall_times = np.array(overall_times) only_network_times = np.array(only_network_times) # list of physical_device_desc devices = [ device.physical_device_desc for device in device_lib.list_local_devices() if device.physical_device_desc ] message = """ ---- measure latency result ---- total number of execution (number of samples): {} network: {} use gpu by network: {} image size: {} devices: {} * overall (include pre-post-process which execute on cpu) total time: {:.4f} msec latency mean (SD=standard deviation): {:.4f} (SD={:.4f}) msec, min: {:.4f} msec, max: {:.4f} msec FPS mean (SD=standard deviation): {:.4f} (SD={:.4f}), min: {:.4f}, max: {:.4f} * network only (exclude pre-post-process): total time: {:.4f} msec latency mean (SD=standard deviation): {:.4f} (SD={:.4f}) msec, min: {:.4f} msec, max: {:.4f} msec FPS mean (SD=standard deviation): {:.4f} (SD={:.4f}), min: {:.4f}, max: {:.4f} ---- measure latency result ---- """.format( step_size, config.NETWORK_CLASS.__name__, not cpu, config.IMAGE_SIZE, devices, # overall np.sum(overall_times) * 1000, # latency np.mean(overall_times) * 1000, np.std(overall_times) * 1000, np.min(overall_times) * 1000, np.max(overall_times) * 1000, # FPS np.mean(1 / overall_times), np.std(1 / overall_times), np.min(1 / overall_times), np.max(1 / overall_times), # network only np.sum(only_network_times) * 1000, # latency np.mean(only_network_times) * 1000, np.std(only_network_times) * 1000, np.min(only_network_times) * 1000, np.max(only_network_times) * 1000, # FPS np.mean(1 / only_network_times), np.std(1 / only_network_times), np.min(1 / only_network_times), np.max(1 / only_network_times), ) print(message)
session_config = tf.ConfigProto() session_config.gpu_options.allow_growth = True self.sess = tf.Session(graph=graph, config=session_config) self.sess.run(init_op) saver.restore(self.sess, restore_path) def __call__(self, input_data): feed_dict = {self.images_placeholder: input_data * (1 / 255.0)} t_begin = time.time() output = self.sess.run(self.output_op, feed_dict=feed_dict) calc_time = time.time() - t_begin return output, calc_time if __name__ == '__main__': environment.init(args.experiment_id) config = config_util.load_from_experiment() print(config) if args.config_file is not None: config = config_util.merge(config, config_util.load(args.config_file)) if args.restore_path is None: restore_file = search_restore_filename(environment.CHECKPOINTS_DIR) restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file) else: restore_path = args.restore_path print("Restore from {}".format(restore_path)) inference_model = Inference(config, restore_path) window_name = os.path.basename(restore_path) run_demo(inference_model, diff_step=args.diff_step,