def main(): global args args = parser.parse_args() model = define_model(is_resnet=False, is_densenet=False, is_senet=True) if torch.cuda.device_count() == 8: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda() batch_size = 64 elif torch.cuda.device_count() == 4: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda() batch_size = 32 else: model = model.cuda() batch_size = 8 cudnn.benchmark = True optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) train_loader = loaddata.getTrainingData(batch_size) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_loader, model, optimizer, epoch) save_checkpoint({'state_dict': model.state_dict()})
def main(): global args args = parser.parse_args() model = define_model(is_resnet=False, is_densenet=True, is_senet=False) if torch.cuda.device_count() == 8: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda() batch_size = 64 elif torch.cuda.device_count() == 4: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda() batch_size = 32 else: model = model.cuda() batch_size = 1 #batch_size = 11 cudnn.benchmark = True optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) train_loader = loaddata.getTrainingData(batch_size) dir_path = os.path.dirname(os.path.realpath(__file__)) model_out_path = dir_path + '/model_output' model_out_path = Path(model_out_path) if not model_out_path.exists(): model_out_path.mkdir() for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_loader, model, optimizer, epoch) torch.save(model.state_dict(), model_out_path/ "model_epoch_{}.pth".format(epoch))
def main(): global args args = parser.parse_args() model = define_model(is_resnet=False, is_densenet=True, is_senet=False) if torch.cuda.device_count() == 8: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda() batch_size = 64 elif torch.cuda.device_count() == 4: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda() batch_size = 32 else: model = model.cuda() batch_size = 1 cudnn.benchmark = True # bai2: This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) train_loader = loaddata.getTrainingData(batch_size) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_loader, model, optimizer, epoch) save_checkpoint({'state_dict': model.state_dict()})
def main(): global args args = parser.parse_args() model = define_model() if args.load: model.load_state_dict(torch.load(args.load)['state_dict']) print('Model loaded from {}'.format(args.load)) if torch.cuda.is_available(): model = model.cuda() device_num = torch.cuda.device_count() if device_num > 1: device_ids = [x for x in range(device_num)] model = torch.nn.DataParallel(model, device_ids=device_ids) args.batch_size *= device_num cudnn.benchmark = True optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) print(args) ''' if 'kitti' in args.data: parser.add_argument('--image_size', default=[[640,192], [320,96]], help='') parser.add_argument('--e', default=0.25, type=float, help='avoid log0') parser.add_argument('--range', default=80, type=int) args = parser.parse_args() train_loader = loaddata.get_kitti_train_data(args) test_loader = loaddata.get_kitti_test_data(args) print('kitti') ''' parser.add_argument('--image_size', default=[[304, 228], [152, 114]], help='') parser.add_argument('--e', default=0.01, type=float, help='avoid log0') parser.add_argument('--range', default=10, type=int) args = parser.parse_args() train_loader = loaddata.getTrainingData(args) test_loader = loaddata.getTestingData(args) print('nyu') setup_logging() for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) loss = train(train_loader, model, optimizer, epoch) rmse = test(test_loader, model, epoch) vis.line([[np.array(loss)], [np.array(rmse)]], [np.array(epoch)], win='train', update='append') save_checkpoint({'state_dict': model.state_dict()}, filename=os.path.join( args.ckp_path, '%02dcheckpoint.pth.tar' % epoch))
def main(): global args args = parser.parse_args() model = define_model(is_resnet=False, is_densenet=False, is_senet=True) if args.start_epoch != 0: model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda() model = model.cuda() state_dict = torch.load(args.model)['state_dict'] model.load_state_dict(state_dict) batch_size = 2 else: model = model.cuda() #model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda() batch_size = 2 cudnn.benchmark = True #optimizer = torch.optim.SGD(model.parameters(), args.lr, weight_decay=args.weight_decay) optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) train_loader = loaddata.getTrainingData(batch_size, args.csv) logfolder = "runs/" + args.data print(args.data) if not os.path.exists(logfolder): os.makedirs(logfolder) configure(logfolder) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_loader, model, optimizer, epoch) out_name = save_model + str(epoch) + '.pth.tar' #if epoch > 30: modelname = save_checkpoint({'state_dict': model.state_dict()}, out_name) print(modelname)
def main(): global args args = parser.parse_args() model = define_model(is_resnet=False, is_densenet=False, is_senet=True) model_final = net.modelfinal() model = model.cuda() model_final = model_final.cuda() batch_size = 1 train_loader = loaddata.getTrainingData(batch_size) optimizer = torch.optim.Adam(model_final.parameters(), args.lr, weight_decay=args.weight_decay) logger = Logger(logdir='experiment_cnn', flush_secs=1) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_loader, model,model_final, optimizer, epoch,logger ) if epoch % 10 == 0: save_checkpoint({'state_dict': model.state_dict()},filename='modelcheckpoint.pth.tar') save_checkpoint({'state_dict_final': model_final.state_dict()},filename='finalmodelcheckpoint.pth.tar') print('save: (epoch: %d)' % (epoch+ 1))
def main(): global args args = parser.parse_args() model_selection = 'resnet' model = define_model(encoder=model_selection) original_model2 = net_mask.drn_d_22(pretrained=True) model2 = net_mask.AutoED(original_model2) if torch.cuda.device_count() == 8: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda() model2 = torch.nn.DataParallel(model2, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda() batch_size = 64 elif torch.cuda.device_count() == 4: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda() model2 = torch.nn.DataParallel(model2, device_ids=[0, 1, 2, 3]).cuda() batch_size = 32 else: model = torch.nn.DataParallel(model).cuda() model2 = torch.nn.DataParallel(model2).cuda() batch_size = 8 model.load_state_dict( torch.load('./pretrained_model/model_' + model_selection)) cudnn.benchmark = True optimizer = torch.optim.Adam(model2.parameters(), args.lr, weight_decay=args.weight_decay) train_loader = loaddata.getTrainingData(batch_size) for epoch in range(args.start_epoch, args.epochs): train(train_loader, model, model2, optimizer, epoch) torch.save(model.state_dict(), '/net_mask/mask_' + model_selection)
def main(): global args args = parser.parse_args() model = define_model(is_resnet=False, is_densenet=False, is_senet=True) if torch.cuda.device_count() == 8: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda() batch_size = 64 elif torch.cuda.device_count() == 2: print("2 gpus used") model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda() batch_size = 10 else: model = model.cuda() batch_size = 4 cudnn.benchmark = True adaptive = robust_loss_pytorch.adaptive.AdaptiveLossFunction( num_dims=114 * 152, float_dtype=np.float32, device=torch.device('cuda:0')) params = list(model.parameters()) + list(adaptive.parameters()) optimizer = torch.optim.Adam(params, args.lr, weight_decay=args.weight_decay) train_loader = loaddata.getTrainingData(batch_size) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) filename = 'epoch-' + str(epoch) + '.pth.tar' print(filename) train(train_loader, model, adaptive, optimizer, epoch) #filename = 'epoch-'+str(epoch)+'.pth.tar' save_checkpoint({'state_dict': model.state_dict()}, filename)
def main(): parser = argparse.ArgumentParser(description='PyTorch Depth Estimation') parser.add_argument('--mode', default="test", type=str, help='number of total epochs to run') parser.add_argument( '--premodel', default="scratch", type=str, help='pretrained model options: imagenet, stereo_view, scratch') # conflict with mode argument # parser.add_argument('--finetune', default=False, # type=bool, # help='pretrained model options: imagenet, stereo_view, scratch') # doing this will result in ignoring the premodel argument parser.add_argument( '--model', default="None", #default="./models/monodepth_resnet18_001.pth", type=str, help='filepath of the model') parser.add_argument( '--arch', default="Resnet", #default="./models/monodepth_resnet18_001.pth", type=str, help='choice of architecture') parser.add_argument('--epochs', default=5, type=int, help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)') parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float, help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, help='momentum') parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, help='weight decay (default: 1e-4)') parser.add_argument( '--load_epoch', default="4", #default="./models/monodepth_resnet18_001.pth", type=str, help='choice of epch') parser.add_argument( '--load_dir', default= "./model_output", #default="./models/monodepth_resnet18_001.pth", type=str, help='choice of output') global args args = parser.parse_args() modes = {"test", "train"} premodels = {"imagenet", "stereo_view", "scratch"} archs = {"Resnet", "Densenet", "SEnet", "Custom"} if args.mode not in modes or args.premodel not in premodels or args.arch not in archs: print("invalid arguments!") exit(1) if args.mode == "test": threshold = 0.25 test(threshold) else: model = define_train_model() if torch.cuda.device_count() == 8: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda() batch_size = 64 elif torch.cuda.device_count() == 4: model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda() batch_size = 32 elif torch.cuda.device_count() == 2: model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda() batch_size = 8 else: model = model.cuda() batch_size = 4 #batch_size = 11 cudnn.benchmark = True optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) train_loader = loaddata.getTrainingData(batch_size) #train_loader = loaddata.getStyleTrainingData(batch_size) dir_path = os.path.dirname(os.path.realpath(__file__)) model_out_path = dir_path + '/model_output' model_out_path = Path(model_out_path) if not model_out_path.exists(): model_out_path.mkdir() for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_loader, model, optimizer, epoch) torch.save(model.state_dict(), model_out_path / "_model_epoch_{}.pth".format(epoch))