コード例 #1
0
 def do_debug(self, args=None):
     if not args:
         debug_check = spRun(['retinanet-debug', 'csv', self.PATH_TO_TRAIN_CSV ,self.PATH_TO_CLASSES_CSV])
         try:
             debug_check.check_returncode()
         except:
             print('From Tool: ATTENTION! \t retinanet model evaluating was failure! Next processes may not work properly. ')
     else:
         spRun((['retinanet-debug'] + args))
コード例 #2
0
 def do_build_dataset(self, args):
     if not args:
         spRun(['python', self.PATH_TO_BUILD_DATASET])
         print('dataset succesfully builded')
         self.do_debug()
     else:
         spRun((['python', self.PATH_TO_BUILD_DATASET] + args))
         print('dataset succesfully builded')
         self.do_debug()
コード例 #3
0
 def do_convert(self, args=None):
     if not args:
         convert_check = spRun(['retinanet-convert-model', self.PATH_TO_BEST_MODEL , self.PATH_TO_CONVERTED_MODEL])
         try:
             convert_check.check_returncode()
         except:
             print('From Tool: ATTENTION! \t retinanet model converting was failure! Next processes may not work properly. ')
     else:
         spRun((['retinanet-convert-model'] + args))
コード例 #4
0
 def do_train(self, args=None):
     print('You can find the config file from mrcnn/own_dataset/own_config.py that includes default train arguments assigning.')
     args = self.trainDefaultArgumentsSetter(args)
     train_check = spRun((['python', self.PATH_TO_TRAIN_PY] + args))
     try:
         train_check.check_returncode()
     except:
         print('From Tool: ATTENTION! \t retinanet training was failure! Next processes may not work properly. ')
コード例 #5
0
 def do_train(self, args=None):
     args = self.trainDefaultArgumentsSetter(args)
     self.move_old_snapshots()
     train_check = spRun((['retinanet-train']  + args))
     try:
         train_check.check_returncode()
     except:
         print('From Tool: ATTENTION! \t retinanet training was failure! Next processes may not work properly. ')
     self.select_best_training_model()
     self.do_convert()
     self.do_evaluate()
コード例 #6
0
 def do_predict(self, args=None):
     if not args:
         spRun(['python', self.PATH_TO_PREDICT_PY])
     else:
         spRun((['python', self.PATH_TO_PREDICT_PY] + args))
コード例 #7
0
 def do_build_dataset(self, args):
     if not args:
         spRun(['python', self.PATH_TO_BUILD_DATASET])
     else:
         spRun((['python', self.PATH_TO_BUILD_DATASET] + args))
コード例 #8
0
 def do_test(self, args):
     if not args:
         spRun(['retinanet-test'])
     else:
         spRun((['retinanet-test'] + args))
コード例 #9
0
 def do_predict(self, args=None):
     args = self.predictDefaultArgumentsSetter(args)
     spRun((['python', 'retinanet/predict.py'] + args))
コード例 #10
0
def main(args):
    ap = argparse.ArgumentParser()

    ap.add_argument(
        "-m",
        "--model",
        default=0,
        type=int,
        choices=[0, 1, 2, 3, 4, 5],
        help=("Model to be trained"
              "\n0: ssd_mobilenet_v2_320x320_coco17_tpu-8 (DEFAULT)"
              "\n1: efficientdet_d7_coco17_tpu-32"
              "\n2: centernet_resnet50_v1_fpn_512x512_coco17_tpu-8"
              "\n3: mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8"
              "\n4: ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8"
              "\n5: faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8"))
    ap.add_argument(
        "-b",
        "--batch_size",
        type=int,
        help=
        'optional batch_size parameter for training. (Higher values require more memory and vice-versa)'
    )
    ap.add_argument(
        "-d",
        "--dont_use_checkpoint",
        action='store_false',
        help=
        'If this parameter is passed. Training process does not use pre-trained weight, it starts from scratch'
    )

    args = vars(ap.parse_args())

    model_index = args["model"]
    batch_size = args["batch_size"]
    dont_use_checkpoint = args["dont_use_checkpoint"]

    models = [
        'ssd_mobilenet_v2_320x320_coco17_tpu-8',
        'efficientdet_d7_coco17_tpu-32',
        'centernet_resnet50_v1_fpn_512x512_coco17_tpu-8',
        'mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8',
        'ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8',
        'faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8'
    ]
    model_name = models[model_index]

    pre_trained_models_dir = os.path.abspath(
        os.path.join(ROOT_DIR, 'pre-trained-models'))
    pre_trained_model = get_pre_trained_model(model_name,
                                              pre_trained_models_dir)
    config_file_new = edit_config(model_name, pre_trained_models_dir,
                                  pre_trained_model, batch_size,
                                  dont_use_checkpoint)

    ### call model_main.py with args for training
    now = datetime.now()
    dt_string = now.strftime("%d-%m-%Y_%H-%M-%S")
    model_dir = os.path.abspath(os.path.join(ROOT_DIR, 'training', dt_string))
    os.makedirs(os.path.abspath(os.path.join(model_dir, 'export', 'Servo')))
    args_for_model_main = [
        '--alsologtostderr', '--model_dir=' + model_dir,
        ('--pipeline_config_path=' + config_file_new)
    ]
    spRun((['python', PATH_TO_MODEL_MAIN_PY] + args_for_model_main))

    print('\n\nmodel_main_tf_2.py is Done...\n\n')

    ### call export_inference_graph.py with args to prepare model for prediction
    model_ckpt = os.path.abspath(os.path.join(model_dir))
    inference_graph_dir = os.path.abspath(
        os.path.join(ROOT_DIR, 'trained-inference-graphs', dt_string))
    os.makedirs(inference_graph_dir)
    inference_graph_path = os.path.abspath(os.path.join(inference_graph_dir))
    args_for_inference_graph = [
        '--input_type=image_tensor',
        ('--pipeline_config_path=' + config_file_new),
        ('--trained_checkpoint_dir=' + model_ckpt),
        ('--output_directory=' + inference_graph_path)
    ]
    print('args_for_inference_graph:', args_for_inference_graph)
    spRun((['python', PATH_TO_EXPORT_INFERENCE_GRAPHPY] +
           args_for_inference_graph))

    label_map_src = os.path.abspath(
        os.path.join(DATASET_DIR, 'train', 'ssd_label_map.pbtxt'))
    label_map_dst = os.path.abspath(
        os.path.join(ROOT_DIR, 'trained-inference-graphs', dt_string,
                     'ssd_label_map.pbtxt'))
    copy2(label_map_src, label_map_dst)

    num_classes_src = os.path.abspath(
        os.path.join(DATASET_DIR, 'train', 'ssd_num_classes.txt'))
    num_classes_dst = os.path.abspath(
        os.path.join(ROOT_DIR, 'trained-inference-graphs', dt_string,
                     'ssd_num_classes.txt'))
    copy2(num_classes_src, num_classes_dst)
コード例 #11
0
def main(args):
    ap = argparse.ArgumentParser()

    ap.add_argument(
        "-c",
        "--configFile",
        default=0,
        type=int,
        choices=[0, 1, 2, 3],
        help=
        ("A .config file contains train-evaluate-prediction configurations based selected backbone"
         "\n0: ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync.config (DEFAULT)"
         "\n1: ssd_resnet101_v1_fpn_shared_box_predictor_oid_512x512_sync.config"
         "\n2: ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync.config"
         ))
    ap.add_argument(
        "-b",
        "--batch_size",
        type=int,
        help=
        'optional batch_size parameter for training. (Higher values require more memory and vice-versa)'
    )
    ap.add_argument(
        "-d",
        "--dont_use_checkpoint",
        action='store_false',
        help=
        'If this parameter is passed. Training process does not use pre-trained weight, it starts from scratch'
    )

    args = vars(ap.parse_args())

    config_file = args["configFile"]
    batch_size = args["batch_size"]
    dont_use_checkpoint = args["dont_use_checkpoint"]
    config_file_new, num_steps_train = edit_config(config_file, batch_size,
                                                   dont_use_checkpoint)

    ### call model_main.py with args for training
    now = datetime.now()
    dt_string = now.strftime("%d-%m-%Y_%H-%M-%S")
    model_dir = os.path.abspath(os.path.join(ROOT_DIR, 'training', dt_string))
    os.makedirs(os.path.abspath(os.path.join(model_dir, 'export', 'Servo')))
    model_dir_str = '\"' + str(model_dir) + '/\"'
    config_file_new_str = '\"' + str(config_file_new) + '\"'
    args_for_model_main = [
        '--alsologtostderr', ' --model_dir=' + model_dir_str,
        (' --pipeline_config_path=' + config_file_new_str)
    ]
    spRun(['python', PATH_TO_MODEL_MAIN_PY, args_for_model_main])

    ### call export_inference_graph.py with args to prepare model for prediction
    model_ckpt = os.path.abspath(
        os.path.join(model_dir, ('model.ckpt-' + str(num_steps_train))))
    inference_graph_dir = os.path.abspath(
        os.path.join(ROOT_DIR, 'trained-inference-graphs', dt_string))
    os.makedirs(inference_graph_dir)
    inference_graph_path = os.path.abspath(
        os.path.join(inference_graph_dir, 'output_inference_graph_v1.pb'))
    inference_graph_path_str = '\"' + str(inference_graph_path) + '\"'
    model_ckpt_str = '\"' + str(model_ckpt) + '\"'
    args_for_inference_graph = [
        '--input_type=image_tensor',
        (' --pipeline_config_path=' + config_file_new_str),
        (' --trained_checkpoint_prefix=' + model_ckpt_str),
        (' --output_directory=' + inference_graph_path_str)
    ]
    spRun(
        ['python', PATH_TO_EXPORT_INFERENCE_GRAPHPY, args_for_inference_graph])

    label_map_src = os.path.abspath(
        os.path.join(DATASET_DIR, 'train', 'ssd_label_map.pbtxt'))
    label_map_dst = os.path.abspath(
        os.path.join(ROOT_DIR, 'trained-inference-graphs', dt_string,
                     'ssd_label_map.pbtxt'))
    copy2(label_map_src, label_map_dst)

    num_classes_src = os.path.abspath(
        os.path.join(DATASET_DIR, 'train', 'ssd_num_classes.txt'))
    num_classes_dst = os.path.abspath(
        os.path.join(ROOT_DIR, 'trained-inference-graphs', dt_string,
                     'ssd_num_classes.txt'))
    copy2(num_classes_src, num_classes_dst)