Esempio n. 1
0
    parser.add_argument('--lr-steps', default=[16, 19], nargs='+', type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
    parser.add_argument('--print-freq', default=1000, type=int, help='print frequency')
    parser.add_argument('--output-dir', default=None, help='path where to save')
    parser.add_argument('--resume', default='', help='resume from checkpoint')
    parser.add_argument('-rp', '--results-path', default='results',
                        help='path to save detection results (only for voc)')
    parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
    parser.add_argument('--aspect-ratio-group-factor', default=3, type=int)
    parser.add_argument('-i', "--init", dest="init", help="if use init sample", action="store_true")
    parser.add_argument("--test-only", dest="test_only", help="Only test the model", action="store_true")
    parser.add_argument('-s', "--skip", dest="skip", help="Skip first cycle and use pretrained model to save time",
                        action="store_true")
    parser.add_argument('-m', "--mutual", dest="mutual", help="use mutual information",
                        action="store_true")
    parser.add_argument('-mr', default=1.2, type=float, help='mutual range')
    parser.add_argument('-bp', default=1.15, type=float, help='base point')
    parser.add_argument("--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo",
                        action="store_true")
    # distributed training parameters
    parser.add_argument('--world-size', default=1, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')

    args = parser.parse_args()

    if args.output_dir:
        utils.mkdir(args.output_dir)

    main(args)
                        default=0.05,
                        help='used during inference')
    parser.add_argument('--iou_threshold',
                        type=float,
                        default=0.5,
                        help='used during inference')
    parser.add_argument('--max-detections',
                        type=int,
                        default=300,
                        help='used during inference')
    parser.add_argument('--resume', default='', help='resume from checkpoint')

    args = parser.parse_args()
    exp_name = (args.resume).split("/")[-2]
    out_dir = os.path.join('../jsons', args.dataset, exp_name)
    utils.mkdir(out_dir)

    root = '../../../../datasets/coco/images'
    if args.dataset == 'lvis':
        annotations = "../../../../datasets/coco/annotations/lvis_v1_val.json"
        dset = LVISDetection(root,
                             annotations,
                             transforms=transforms.ToTensor())
        num_classes = 1204
    elif args.dataset == 'coco':
        annotations = "../../../../datasets/coco/annotations/instances_val2017.json"
        dset = CocoDetection(root,
                             annotations,
                             transforms=transforms.ToTensor())
        num_classes = 91
    else: