transform=composed_transforms_tr) db_train = combine_dbs.CombineDBs([voc_train, sbd_train], excluded=[voc_val]) else: db_train = voc_train trainloader = DataLoader(db_train, batch_size=p['trainBatch'], shuffle=True, num_workers=0) testloader = DataLoader(voc_val, batch_size=testBatch, shuffle=False, num_workers=0) utils.generate_param_report(os.path.join(save_dir, exp_name + '.txt'), p) num_img_tr = len(trainloader) num_img_ts = len(testloader) running_loss_tr = 0.0 running_loss_ts = 0.0 aveGrad = 0 global_step = 0 print("Training Network") # Main Training and Testing Loop for epoch in range(resume_epoch, nEpochs): start_time = timeit.default_timer() if epoch % p['epoch_size'] == p['epoch_size'] - 1: lr_ = utils.lr_poly(p['lr'], epoch, nEpochs, 0.9)
tr.ToTensor()]) voc_train = pascal.VOCSegmentation(split='train', transform=composed_transforms_tr) voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts) if use_sbd: print("Using SBD dataset") sbd_train = sbd.SBDSegmentation(split=['train', 'val'], transform=composed_transforms_tr) db_train = combine_dbs.CombineDBs([voc_train, sbd_train], excluded=[voc_val]) else: db_train = voc_train trainloader = DataLoader(db_train, batch_size=p['trainBatch'], shuffle=True, num_workers=2) testloader = DataLoader(voc_val, batch_size=testBatch, shuffle=False, num_workers=2) utils.generate_param_report(os.path.join(save_dir, exp_name + '.txt'), p) num_img_tr = len(trainloader) num_img_ts = len(testloader) running_loss_tr = 0.0 running_loss_ts = 0.0 aveGrad = 0 print("Training Network") # Main Training and Testing Loop for epoch in range(resume_epoch, nEpochs): start_time = timeit.default_timer() if epoch % p['epoch_size'] == p['epoch_size'] - 1: lr_ = utils.lr_poly(p['lr'], epoch, nEpochs, 0.9) print('(poly lr policy) learning rate: ', lr_)