def main(): global args, batchSize, kSaveModel, bb_params args = parser.parse_args() print(args) batchSize = args.batch_size kSaveModel = args.save_freq np.random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) if cuda: torch.cuda.manual_seed_all(args.manual_seed) # load bounding box motion model params bb_params['lambda_shift_frac'] = args.lambda_shift_frac bb_params['lambda_scale_frac'] = args.lambda_scale_frac bb_params['min_scale'] = args.min_scale bb_params['max_scale'] = args.max_scale # load datasets alov = ALOVDataset( os.path.join("../../pygoturn/data", 'imagedata++/'), os.path.join("../../pygoturn/data", 'alov300++_rectangleAnnotation_full/'), NormalizeToTensor(), input_size) # imagenet = ILSVRC2014_DET_Dataset(os.path.join("../../pygoturn/data", # 'ILSVRC2014_DET_train/'), # os.path.join("../../pygoturn/data", # 'ILSVRC2014_DET_bbox_train/'), # bb_params, # transform, # input_size) #list of datasets to train on datasets = [ alov, ] # load model net = model.SPPGoNet().to(device) # summary(net, [(3, 224, 224), (3, 224, 224)]) loss_fn = torch.nn.L1Loss(size_average=False).to(device) # initialize optimizer optimizer = optim.SGD(net.classifier.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if os.path.exists(args.save_directory): print('Directory %s already exists' % (args.save_directory)) else: os.makedirs(args.save_directory) # start training net = train_model(net, datasets, loss_fn, optimizer) # save trained model checkpoint = {'state_dict': net.state_dict()} path = os.path.join(args.save_directory, 'pytorch_goturn.pth.tar') torch.save(checkpoint, path)
def main(): global args args = parser.parse_args() print(args) np.random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) if use_gpu: torch.cuda.manual_seed(args.manual_seed) # load datasets alov = ALOVDataset('../data/alov300/imagedata++/', '../data/alov300/alov300++_rectangleAnnotation_full/', transform) imagenet = ILSVRC2014_DET_Dataset('../data/imagenet_img/', '../data/imagenet_bbox/', transform, args.lambda_shift_frac, args.lambda_scale_frac, args.min_scale, args.max_scale) # list of datasets to train on datasets = [alov, imagenet] # load model net = model.GoNet() loss_fn = torch.nn.L1Loss(size_average=False) if use_gpu: net = net.cuda() loss_fn = loss_fn.cuda() # initialize optimizer optimizer = optim.SGD(net.classifier.parameters(), lr=args.learning_rate, weight_decay=0.0005) if os.path.exists(args.save_directory): print('Directory %s already exists' % (args.save_directory)) else: os.makedirs(args.save_directory) # start training net = train_model(net, datasets, loss_fn, optimizer)
model_dir = 0 if args.model_dir: params = util.Params() params.update(args.model_dir) model_dir = args.model_dir else: model_dir_path = os.path.join(".", "model") if not os.path.isdir(model_dir_path): os.mkdir(model_dir_path) model_dir = model_dir_path params.cuda = torch.cuda.is_available() alov = ALOVDataset('/large_storage/imagedata++', '/large_storage/alov300++_rectangleAnnotation_full', transform) dataloader = DataLoader(alov, batch_size=params.batch_size) use_gpu = torch.cuda.is_available() model = model.Re3Net().cuda() if use_gpu else model.Re3Net() optimizer = optim.Adam(model.parameters(), lr=params.learning_rate) net = 0 loss_fn = model.loss_fn(params.cuda) # Train the model logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
batchSize = args.batch_size kSaveModel = args.save_freq np.random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) bb_params = {} bb_params['lambda_shift_frac'] = 5 bb_params['lambda_scale_frac'] = 15 bb_params['min_scale'] = -0.4 bb_params['max_scale'] = 0.4 transform = NormalizeToTensor() input_size = 224 alov = ALOVDataset( os.path.join("../pygoturn/data", 'imagedata++/'), os.path.join("../pygoturn/data", 'alov300++_rectangleAnnotation_full/'), NormalizeToTensor(), input_size) # imagenet = ILSVRC2014_DET_Dataset(os.path.join("../pygoturn/data", # 'ILSVRC2014_DET_train/'), # os.path.join("../pygoturn/data", # 'ILSVRC2014_DET_bbox_train/'), # bb_params, # transform, # input_size) cuda = torch.cuda.is_available() device = torch.device('cuda:0' if cuda else 'cpu') net = GOTURN_AlexNET()
def main(): global args args = parser.parse_args() print(args) np.random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) if use_gpu: torch.cuda.manual_seed(args.manual_seed) # load datasets alov = ALOVDataset('../data/alov300/imagedata++/', '../data/alov300/alov300++_rectangleAnnotation_full/', transform) imagenet = ILSVRC2014_DET_Dataset('../data/imagenet_img/', '../data/imagenet_bbox/', transform, args.lambda_shift_frac, args.lambda_scale_frac, args.min_scale, args.max_scale) # list of datasets to train on datasets = [alov, imagenet] # load model net = model.GoNet() loss_fn = torch.nn.L1Loss(size_average=False) if use_gpu: net = net.cuda() loss_fn = loss_fn.cuda() # initialize optimizer trainable_weights = [] trainable_bias = [] for name, param in net.classifier.named_parameters(): if 'weight' in name: trainable_weights.append(param) elif 'bias' in name: trainable_bias.append(param) optimizer = optim.SGD([{ 'params': trainable_weights, 'lr': args.learning_rate * 10 }, { 'params': trainable_bias, 'lr': args.learning_rate * 20 }], lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if os.path.exists(args.save_directory): print('Directory %s already exists' % (args.save_directory)) else: os.makedirs(args.save_directory) # start training net = train_model(net, datasets, loss_fn, optimizer) # save trained model path = os.path.join(args.save_directory, 'final_model.pth') torch.save(net.state_dict(), path)