Esempio n. 1
0
def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                numRho=CONFIGS["MODEL"]["NUMRHO"],
                backbone=CONFIGS["MODEL"]["BACKBONE"])
    model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])

    if args.model:
        if isfile(args.model):
            logger.info("=> loading pretrained model '{}'".format(args.model))
            checkpoint = torch.load(args.model)
            model.load_state_dict(checkpoint['state_dict'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.model, checkpoint['epoch']))
        else:
            logger.info("=> no pretrained model found at '{}'".format(
                args.model))
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"],
                             CONFIGS["DATA"]["TEST_LABEL_FILE"],
                             batch_size=1,
                             num_thread=CONFIGS["DATA"]["WORKERS"],
                             test=True)

    logger.info("Data loading done.")

    logger.info("Start testing.")
    total_time = test(test_loader, model, args)

    logger.info(
        "Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" %
        (len(test_loader), total_time, len(test_loader) / total_time))
Esempio n. 2
0
def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], backbone=CONFIGS["MODEL"]["BACKBONE"])
    model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])

    # load the pretrained model (you are free to load your own models)
    state_dict = torch.hub.load_state_dict_from_url("http://data.kaizhao.net/projects/deep-hough-transform/dht_r50_fpn_sel-c9a29d40.pth", check_hash=True)
    model.load_state_dict(state_dict)


    if args.model:
        if isfile(args.model):
            logger.info("=> loading pretrained model '{}'".format(args.model))
            checkpoint = torch.load(args.model)
            model.load_state_dict(checkpoint)
            logger.info("=> loaded checkpoint '{}'"
                  .format(args.model))
        else:
            logger.info("=> no pretrained model found at '{}'".format(args.model))
            exit()
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"], CONFIGS["DATA"]["TEST_LABEL_FILE"], 
                                batch_size=1, num_thread=CONFIGS["DATA"]["WORKERS"], test=True)

    logger.info("Data loading done.")

    
    logger.info("Start testing.")
    total_time = test(test_loader, model, args)
    
    logger.info("Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" % (len(test_loader), total_time, len(test_loader) / total_time))
Esempio n. 3
0
    logger = Logger('./logs')
    batch_size = 1
    load_checkpoint= True
    
    print( '%s: calling main function ... ' % os.path.basename(__file__))
    csv_path = '../data/detection_test.csv'
    img_path = '../data/detection_test_t'
    dataset = Rand_num(csv_path, img_path, 112*4, None)
    sampler = SequentialSampler(dataset)
    loader = DataLoader(dataset, batch_size = batch_size, sampler = sampler, shuffle = False, num_workers=2)

    net = Net(14)
    if load_checkpoint:
        net.load_state_dict(torch.load(SAVE_PATH))
        
    net.cuda()
        
    accu_tp=[]
    accu_fp=[]
    accu_iou=[]
    for epoch in range(1): 
        for num, data in enumerate(loader, 0):
                # get the inputs
            images, inputs, labels = data
            inputs, labels = inputs.float()[0]/256, labels.float()
#        
#                # wrap them in Variable
#                
            inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
            outputs = net.forward(inputs, False)
            img = (inputs*256).data.cpu().int().numpy()[0,0]
Esempio n. 4
0
def main():

    logger.info(args)
    assert os.path.isdir(CONFIGS["DATA"]["DIR"])

    if CONFIGS['TRAIN']['SEED'] is not None:
        random.seed(CONFIGS['TRAIN']['SEED'])
        torch.manual_seed(CONFIGS['TRAIN']['SEED'])
        cudnn.deterministic = True

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                numRho=CONFIGS["MODEL"]["NUMRHO"],
                backbone=CONFIGS["MODEL"]["BACKBONE"])

    if CONFIGS["TRAIN"]["DATA_PARALLEL"]:
        logger.info("Model Data Parallel")
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])

    # optimizer
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=CONFIGS["OPTIMIZER"]["LR"],
        weight_decay=CONFIGS["OPTIMIZER"]["WEIGHT_DECAY"])

    # learning rate scheduler
    scheduler = lr_scheduler.MultiStepLR(
        optimizer,
        milestones=CONFIGS["OPTIMIZER"]["STEPS"],
        gamma=CONFIGS["OPTIMIZER"]["GAMMA"])
    best_acc1 = 0
    if args.resume:
        if isfile(args.resume):
            logger.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            # optimizer.load_state_dict(checkpoint['optimizer'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            logger.info("=> no checkpoint found at '{}'".format(args.resume))

    # dataloader
    train_loader = get_loader(CONFIGS["DATA"]["DIR"],
                              CONFIGS["DATA"]["LABEL_FILE"],
                              batch_size=CONFIGS["DATA"]["BATCH_SIZE"],
                              num_thread=CONFIGS["DATA"]["WORKERS"],
                              split='train')
    val_loader = get_loader(CONFIGS["DATA"]["VAL_DIR"],
                            CONFIGS["DATA"]["VAL_LABEL_FILE"],
                            batch_size=1,
                            num_thread=CONFIGS["DATA"]["WORKERS"],
                            split='val')

    logger.info("Data loading done.")

    # Tensorboard summary

    writer = SummaryWriter(log_dir=os.path.join(CONFIGS["MISC"]["TMP"]))

    start_epoch = 0
    best_acc = best_acc1
    is_best = False
    start_time = time.time()

    if CONFIGS["TRAIN"]["RESUME"] is not None:
        raise (NotImplementedError)

    if CONFIGS["TRAIN"]["TEST"]:
        validate(val_loader, model, 0, writer, args)
        return

    logger.info("Start training.")

    for epoch in range(start_epoch, CONFIGS["TRAIN"]["EPOCHS"]):

        train(train_loader, model, optimizer, epoch, writer, args)
        acc = validate(val_loader, model, epoch, writer, args)
        #return
        scheduler.step()

        if best_acc < acc:
            is_best = True
            best_acc = acc
        else:
            is_best = False

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc,
                'optimizer': optimizer.state_dict()
            },
            is_best,
            path=CONFIGS["MISC"]["TMP"])

        t = time.time() - start_time
        elapsed = DayHourMinute(t)
        t /= (epoch + 1) - start_epoch  # seconds per epoch
        t = (CONFIGS["TRAIN"]["EPOCHS"] - epoch - 1) * t
        remaining = DayHourMinute(t)

        logger.info(
            "Epoch {0}/{1} finishied, auxiliaries saved to {2} .\t"
            "Elapsed {elapsed.days:d} days {elapsed.hours:d} hours {elapsed.minutes:d} minutes.\t"
            "Remaining {remaining.days:d} days {remaining.hours:d} hours {remaining.minutes:d} minutes."
            .format(epoch,
                    CONFIGS["TRAIN"]["EPOCHS"],
                    CONFIGS["MISC"]["TMP"],
                    elapsed=elapsed,
                    remaining=remaining))

    logger.info("Optimization done, ALL results saved to %s." %
                CONFIGS["MISC"]["TMP"])