Exemplo n.º 1
0
def test_train(setup_tf_eager, setup_darknet_weights, setup_train_dirs):

    ann_fnames, image_root = setup_train_dirs
    darknet_weights = setup_darknet_weights

    # 1. create generator
    generator = BatchGenerator(ann_fnames,
                               image_root,
                               batch_size=2,
                               labels_naming=["raccoon"],
                               jitter=False)
    valid_generator = BatchGenerator(ann_fnames,
                                     image_root,
                                     batch_size=2,
                                     labels_naming=["raccoon"],
                                     jitter=False)

    # 2. create model
    model = Yolonet(n_classes=1)
    model.load_darknet_params(darknet_weights, True)

    # 3. training
    loss_history = train_fn(model, generator, valid_generator, num_epoches=3)
    assert loss_history[0] > loss_history[-1]
Exemplo n.º 2
0
                                       batch_size=config["train"]["batch_size"],
                                       labels_naming=config["model"]["labels"],
                                       anchors=config["model"]["anchors"],
                                       jitter=False,
                                       shuffle=False)
    
    print(train_generator.steps_per_epoch)
    
    # 2. create model
    model = Yolonet(n_classes=len(config["model"]["labels"]))
    model.load_darknet_params(config["pretrained"]["darknet_format"], skip_detect_layer=True)
 
    # 4. traini
    train_fn(model,
             train_generator,
             valid_generator,
             learning_rate=config["train"]["learning_rate"],
             save_dname=config["train"]["save_folder"],
             num_epoches=config["train"]["num_epoch"])

    # 5. prepare sample images
    img_fnames = glob.glob(os.path.join(config["train"]["train_image_folder"], "*.*"))
    imgs = [cv2.imread(fname)[:,:,::-1] for fname in img_fnames]

    # 6. create new model & load trained weights
    model = Yolonet(n_classes=len(config["model"]["labels"]))
    model.load_weights(os.path.join(config["train"]["save_folder"], "weights.h5"))
    detector = YoloDetector(model)
 
    # 7. predict & plot
    boxes = detector.detect(imgs[0], config["model"]["anchors"])
    image = draw_boxes(imgs[0], boxes, labels=config["model"]["labels"])
Exemplo n.º 3
0
                       help='config file')

if __name__ == '__main__':
    args = argparser.parse_args()
    config_parser = ConfigParser(args.config)

    # Select device
    #tf.debugging.set_log_device_placement(True)
    gpus = tf.config.experimental.list_physical_devices('GPU')
    device = "/CPU:0" if len(gpus) == 0 else "/GPU:0"
    device = "/GPU:1" if TF_GPU_SETUP == "iis" else device

    with tf.device(device):
        # Create data generator
        train_generator, valid_generator = config_parser.create_generator()

        # Create the YoloV3 model
        model = config_parser.create_model()

        # Train the (preloaded) model
        learning_rate, save_dname, n_epoches = config_parser.get_train_params()
        summary_dir = save_dname + "/summary"
        train_fn(config_parser,
                 model,
                 train_generator,
                 valid_generator,
                 summary_dir=summary_dir,
                 learning_rate=learning_rate,
                 save_dname=save_dname,
                 epoch=n_epoches)
Exemplo n.º 4
0
argparser.add_argument('-c',
                       '--config',
                       default="configs/test.json",
                       help='config file')

if __name__ == '__main__':
    args = argparser.parse_args()
    # config = './configs/svhn.json'
    config = args.config
    config_parser = ConfigParser(config)

    # 1. create generator
    split_train_valid = config_parser.split_train_val()
    train_generator, valid_generator = config_parser.create_generator(
        split_train_valid=split_train_valid)

    # 2. create model
    model = config_parser.create_model()

    # 3. training
    learning_rate, save_dir, weight_name, n_epoches, checkpoint_path = config_parser.get_train_params(
    )
    train_fn(model,
             train_generator,
             valid_generator,
             learning_rate=learning_rate,
             save_dir=save_dir,
             weight_name=weight_name,
             num_epoches=n_epoches,
             configs=config_parser)