示例#1
0
if __name__ == '__main__':
    with open('config.yaml', 'r') as f:
        cfg = yaml.load(f)

    if args.arch == 'ssd300':
        config = cfg['SSD300']
    else:
        config = cfg['SSD512']

    default_boxes = generate_default_boxes(config)

    dataloader, info = create_dataloader(args.data_dir, args.batch_size,
                                         config['image_size'], default_boxes,
                                         args.augmentation, args.num_examples)

    ssd = create_ssd(NUM_CLASSES, args.arch, 'base', args.pretrained_path)
    ssd.to(device)

    criterion = create_loss(args.neg_ratio, NUM_CLASSES)

    optimizer = optim.SGD(ssd.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[int(args.num_epochs * 0.65),
                    int(args.num_epochs * 0.8)],
        gamma=0.1,
        last_epoch=-1)
示例#2
0
        config = cfg[args.arch.upper()]
    except AttributeError:
        raise ValueError('Unknown architecture: {}'.format(args.arch))

    default_boxes = generate_default_boxes(config)

    batch_generator, info = create_batch_generator(args.data_dir,
                                                   args.data_year,
                                                   default_boxes,
                                                   config['image_size'],
                                                   BATCH_SIZE,
                                                   args.num_examples,
                                                   mode='test')

    try:
        ssd = create_ssd(NUM_CLASSES, args.arch, args.pretrained_type,
                         args.checkpoint_dir, args.checkpoint_path)
    except Exception as e:
        print(e)
        print('The program is exiting...')
        sys.exit()

    os.makedirs('outputs/images', exist_ok=True)
    os.makedirs('outputs/detects', exist_ok=True)
    visualizer = ImageVisualizer(info['idx_to_name'],
                                 save_dir='outputs/images')

    for i, (filename, imgs, gt_confs, gt_locs) in enumerate(
            tqdm(batch_generator,
                 total=info['length'],
                 desc='Testing...',
                 unit='images')):
示例#3
0
    config = cfg['SSD300']
    default_boxes = generate_default_boxes(config)

    # voc_data.py에서 설정한 Dataset을 Batch형태로서 가져온다.
    batch_generator, val_generator, info = create_batch_generator(
        args.data_dir,
        default_boxes,
        args.batch_size,
        args.num_batches,
        mode='train')

    # 실제 SSD Model을 설정한다. 만약, Training중이던 Model이 있으면 그대로 가져가서 사용할 수 있다.
    try:
        ssd = create_ssd(NUM_CLASSES,
                         args.pretrained_type,
                         checkpoint_dir=args.checkpoint_dir)
    except Exception as e:
        print(e)
        print('The program is exiting...')
        sys.exit()

    # Hard negative mining을 적용하여 Loss를 구한다.
    criterion = create_losses(args.neg_ratio, NUM_CLASSES)
    steps_per_epoch = info['length'] // args.batch_size

    # 해당 논문에서는 The learning rate decay policy is slightly different for each dataset
    # 로서 설명하였다. 정확한 방법은 나와있지 않아서 아마 원본 Code를 참고하여 만든 것 같다.
    lr_fn = PiecewiseConstantDecay(boundaries=[
        int(steps_per_epoch * args.num_epochs * 2 / 3),
        int(steps_per_epoch * args.num_epochs * 5 / 6)