Ejemplo n.º 1
0
    
    batch_generator, val_generator, info = create_batch_generator(
                DATA_DIR, DATA_YEAR, default_boxes,
                SIZE, BATCH_SIZE, NUM_BATCHES,
                mode='train', augmentation=['flip'])  # the patching algorithm is currently causing bottleneck sometimes


    dummy = tf.random.normal((1, 300, 300, 3))
    ssd = create_pre_ssd_mobilenetv1_lite(weights=None)

    pretrained_type = 'specified'
    checkpoint_path = CHECKPOINT_PATH
    
    net = init_ssd(ssd, pretrained_type, checkpoint_path)

    criterion = create_losses(NEG_RATIO, NUM_CLASSES)
    steps_per_epoch = info['length'] // BATCH_SIZE

    lr_fn = PiecewiseConstantDecay(
            boundaries=[int(steps_per_epoch * NUM_EPOCHS * 2 / 3),
                        int(steps_per_epoch * NUM_EPOCHS * 5 / 6)],
            values=[INITIAL_LR, INITIAL_LR * 0.1, INITIAL_LR * 0.01])

    optimizer = tf.keras.optimizers.SGD(
        learning_rate=INITIAL_LR,
        momentum=MOMENTUM)

    train_log_dir = 'logs/train'
    val_log_dir = 'logs/val'

    train_summary_writer = tf.summary.create_file_writer(train_log_dir)
Ejemplo n.º 2
0
        do_shuffle=False,
        augmentation=[
            'flip'
        ])  # the patching algorithm is currently causing bottleneck sometimes

    try:
        ssd = create_ssd(NUM_CLASSES,
                         args.arch,
                         args.pretrained_type,
                         checkpoint_dir=args.checkpoint_dir)
    except Exception as e:
        print(e)
        print('The program is exiting...')
        sys.exit()

    criterion = create_losses(args.neg_ratio, NUM_CLASSES)

    optimizer = tf.keras.optimizers.SGD(learning_rate=args.initial_lr,
                                        momentum=args.momentum,
                                        decay=args.weight_decay)

    for epoch in range(args.num_epochs):
        avg_loss = 0.0
        avg_conf_loss = 0.0
        avg_loc_loss = 0.0
        start = time.time()
        for i, (imgs, gt_confs, gt_locs) in enumerate(batch_generator):
            loss, conf_loss, loc_loss = train_step(imgs, gt_confs, gt_locs,
                                                   ssd, criterion, optimizer)
            avg_loss = (avg_loss * i + loss.numpy()) / (i + 1)
            avg_conf_loss = (avg_conf_loss * i + conf_loss.numpy()) / (i + 1)