Exemple #1
0
def main():
    # ========= dataloaders ===========
    train_dataloader = create_train_dataloader(root=args.datapath,
                                               batch_size=args.batch_size)
    val_dataloader = create_val_dataloader(root=args.datapath,
                                           batch_size=args.batch_size)

    start_epoch = 0

    # ======== models & loss ==========
    model = BiSeNetV2(n_classes=19, output_aux=True)
    loss = OhemCELoss(0.7)
    loss_aux = [OhemCELoss(0.7) for _ in range(4)]

    # ========= load weights ===========
    if args.resume:
        checkpoint = torch.load(args.pretrained, map_location=device)
        # model.load_state_dict(checkpoint['bisenetv2'], strict=False)
        # start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint, strict=False)
        start_epoch = 0
        print(f'\tLoaded checkpoint from {args.pretrained}\n')
        time.sleep(1)
    else:
        print(
            "******************* Start training from scratch *******************\n"
        )
        # time.sleep(2)

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', patience=args.lr_patience, verbose=True)

    # ========================================================================
    for epoch in range(start_epoch, args.epochs):
        # =========== train / validate ===========
        train_loss = train_one_epoch(model, loss, loss_aux, optimizer,
                                     train_dataloader, epoch)
        val_loss = validate(model, loss, val_dataloader, epoch)
        scheduler.step(val_loss)
        logging.info(f"\ttraining epoch={epoch} .. train_loss={train_loss}")
        logging.info(f"\tvalidation epoch={epoch} .. val_loss={val_loss}")
        time.sleep(2)
        # ============= tensorboard =============
        writer.add_scalar('train_loss', train_loss, epoch)
        writer.add_scalar('val_loss', val_loss, epoch)
        # ============== save model =============
        if epoch % args.savefreq == 0:
            checkpoint_state = {
                'bisenetv2': model.state_dict(),
                "epoch": epoch
            }
            savepath = os.path.join(args.savepath,
                                    f'weights_epoch_{epoch}.pth.tar')
            torch.save(checkpoint_state, savepath)
            print(f'\n\t*** Saved checkpoint in {savepath} ***\n')
            time.sleep(2)
    writer.close()
Exemple #2
0
--test_image_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/crane
--test_image_gt_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/crane_labeled
--test_image_density_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/density_map
"""


if __name__=="__main__":
    # argument parsing.
    args = parser.parse_args()
    cfg = Config(args)                                                          # configuration
    model = CSRNet().to(cfg.device)                                         # model
    criterion = nn.MSELoss(size_average=False)                              # objective
    optimizer = torch.optim.Adam(model.parameters(),lr=cfg.lr)              # optimizer

    train_dataloader = create_train_dataloader(cfg.train_dataset_root, use_flip=True, batch_size=cfg.batch_size)
    test_dataloader  = create_test_dataloader(cfg.test_dataset_root)             # dataloader

    min_mae = sys.maxsize
    min_mae_epoch = -1
    for epoch in range(1, cfg.epochs):                          # start training
        model.train()
        epoch_loss = 0.0
        for i, data in enumerate(tqdm(train_dataloader)):
            image = data['image'].to(cfg.device)
            gt_densitymap = data['densitymap'].to(cfg.device) * 16# todo 1/4 rescale effect때문에
            et_densitymap = model(image)                        # forward propagation
            loss = criterion(et_densitymap,gt_densitymap)       # calculate loss
            epoch_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()                                     # back propagation
Exemple #3
0
from collections import OrderedDict

DEVICE = torch.device('cpu')
torch.backends.cudnn.benchmark = True

net = create_network(config.num_channels)
net.to(DEVICE)
criterion = config.create_loss_function().to(DEVICE)

optimizer = config.create_optimizer(net.parameters())
lr_scheduler = config.create_lr_scheduler(optimizer)

dataset_maker = TrainValDataloaderMaker(config.data_paths)

print('--- Dataset Loaded. Totoal: {} samples'.format(dataset_maker.num))
ds_train = create_train_dataloader()
ds_val = create_val_dataloader()

now_epoch = 0

if args.auto_continue:
    args.resume = os.path.join(config.model_dir, 'last.checkpoint')
if args.resume is not None and os.path.isfile(args.resume):
    now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

while True:
    if now_epoch > config.num_epochs:
        break
    now_epoch = now_epoch + 1

    descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(
def main():
    # ========= dataloaders ===========
    train_dataloader = create_train_dataloader(root=args.datapath,
                                               batch_size=args.batch_size)
    test_dataloader = create_val_dataloader(root=args.datapath,
                                            batch_size=args.batch_size)
    # train_dataloader, test_dataloader = create_CK_dataloader(batch_size=args.batch_size)
    start_epoch = 0
    # ======== models & loss ==========
    mini_xception = Mini_Xception()
    loss = nn.CrossEntropyLoss()
    # ========= load weights ===========
    if args.resume or args.evaluate:
        checkpoint = torch.load(args.pretrained, map_location=device)
        mini_xception.load_state_dict(checkpoint['mini_xception'],
                                      strict=False)
        start_epoch = checkpoint['epoch'] + 1
        print(f'\tLoaded checkpoint from {args.pretrained}\n')
        time.sleep(1)
    else:
        print(
            "******************* Start training from scratch *******************\n"
        )
        time.sleep(2)

    if args.evaluate:
        if args.mode == 'test':
            test_dataloader = create_test_dataloader(
                root=args.datapath, batch_size=args.batch_size)
        elif args.mode == 'val':
            test_dataloader = create_val_dataloader(root=args.datapath,
                                                    batch_size=args.batch_size)
        else:
            test_dataloader = create_train_dataloader(
                root=args.datapath, batch_size=args.batch_size)

        validate(mini_xception, loss, test_dataloader, 0)
        return

    # =========== optimizer ===========
    # parameters = mini_xception.named_parameters()
    # for name, p in parameters:
    #     print(p.requires_grad, name)
    # return
    optimizer = torch.optim.Adam(mini_xception.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', patience=args.lr_patience, verbose=True)
    # ========================================================================
    for epoch in range(start_epoch, args.epochs):
        # =========== train / validate ===========
        train_loss = train_one_epoch(mini_xception, loss, optimizer,
                                     train_dataloader, epoch)
        val_loss, accuracy, percision, recall = validate(
            mini_xception, loss, test_dataloader, epoch)
        scheduler.step(val_loss)
        val_loss, accuracy, percision, recall = round(val_loss, 3), round(
            accuracy, 3), round(percision, 3), round(recall, 3)
        logging.info(f"\ttraining epoch={epoch} .. train_loss={train_loss}")
        logging.info(f"\tvalidation epoch={epoch} .. val_loss={val_loss}")
        logging.info(
            f'\tAccuracy = {accuracy*100} % .. Percision = {percision*100} % .. Recall = {recall*100} % \n'
        )
        time.sleep(2)
        # ============= tensorboard =============
        writer.add_scalar('train_loss', train_loss, epoch)
        writer.add_scalar('val_loss', val_loss, epoch)
        writer.add_scalar('percision', percision, epoch)
        writer.add_scalar('recall', recall, epoch)
        writer.add_scalar('accuracy', accuracy, epoch)
        # ============== save model =============
        if epoch % args.savefreq == 0:
            checkpoint_state = {
                'mini_xception': mini_xception.state_dict(),
                "epoch": epoch
            }
            savepath = os.path.join(args.savepath,
                                    f'weights_epoch_{epoch}.pth.tar')
            torch.save(checkpoint_state, savepath)
            print(f'\n\t*** Saved checkpoint in {savepath} ***\n')
            time.sleep(2)
    writer.close()