def __init__(self, root_dir, model_path, save_dir=None): self.root_dir = root_dir self.transform = transforms.Compose([Normalize(), ToTensor()]) self.model_path = model_path self.model = model.GoNet() if use_gpu: self.model = self.model.cuda() self.model.load_state_dict(torch.load(model_path)) frames = os.listdir(root_dir + '/img') frames = [root_dir + "/img/" + frame for frame in frames] self.len = len(frames) - 1 frames = np.array(frames) frames.sort() self.x = [] for i in xrange(self.len): self.x.append([frames[i], frames[i + 1]]) self.x = np.array(self.x) # uncomment to select rectangle manually # init_bbox = bbox_coordinates(self.x[0][0]) f = open(root_dir + '/groundtruth_rect.txt') lines = f.readlines() init_bbox = lines[0].strip().split('\t') init_bbox = [float(x) for x in init_bbox] init_bbox = [ init_bbox[0], init_bbox[1], init_bbox[0] + init_bbox[2], init_bbox[1] + init_bbox[3] ] init_bbox = np.array(init_bbox) print init_bbox self.prev_rect = init_bbox
def main(): args = parser.parse_args() print args # load dataset transform = transforms.Compose([Normalize(), ToTensor()]) alov = datasets.ALOVDataset('../data/alov300/imagedata++/', '../data/alov300/alov300++_rectangleAnnotation_full/', transform) dataloader = DataLoader(alov, batch_size=args.batch_size, shuffle=True, num_workers=4) # load model net = model.GoNet() loss_fn = torch.nn.L1Loss(size_average = False) if use_gpu: net = net.cuda() loss_fn = loss_fn.cuda() optimizer = optim.SGD(net.classifier.parameters(), lr=args.learning_rate, momentum=args.momentum) if os.path.exists(args.save_directory): print('Directory %s already exists' % (args.save_directory)) else: os.makedirs(args.save_directory) # start training net = train_model(net, dataloader, loss_fn, optimizer, args.epochs, args.learning_rate, args.save_directory)
def main(): global args, batchSize, kSaveModel, bb_params args = parser.parse_args() print(args) batchSize = args.batch_size kSaveModel = args.save_freq np.random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) if cuda: torch.cuda.manual_seed_all(args.manual_seed) # load bounding box motion model params bb_params['lambda_shift_frac'] = args.lambda_shift_frac bb_params['lambda_scale_frac'] = args.lambda_scale_frac bb_params['min_scale'] = args.min_scale bb_params['max_scale'] = args.max_scale # load datasets alov = ALOVDataset( os.path.join(args.data_directory, 'imagedata++/'), os.path.join(args.data_directory, 'alov300++_rectangleAnnotation_full/'), transform, input_size) imagenet = ILSVRC2014_DET_Dataset( os.path.join(args.data_directory, 'ILSVRC2014_DET_train/'), os.path.join(args.data_directory, 'ILSVRC2014_DET_bbox_train/'), bb_params, transform, input_size) # list of datasets to train on datasets = [alov, imagenet] # load model net = model.GoNet().to(device) # summary(net, [(3, 224, 224), (3, 224, 224)]) loss_fn = torch.nn.L1Loss(size_average=False).to(device) # initialize optimizer optimizer = optim.SGD(net.classifier.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if os.path.exists(args.save_directory): print('Directory %s already exists' % (args.save_directory)) else: os.makedirs(args.save_directory) # start training net = train_model(net, datasets, loss_fn, optimizer) # save trained model checkpoint = {'state_dict': net.state_dict()} path = os.path.join(args.save_directory, 'pytorch_goturn.pth.tar') torch.save(checkpoint, path)
def __init__(self, root_dir, model_path, save_dir=None): self.root_dir = root_dir self.transform = transforms.Compose([Normalize(), ToTensor()]) self.model_path = model_path self.model = model.GoNet() if use_gpu: self.model = self.model.cuda() self.model.load_state_dict(torch.load(model_path)) frames = os.listdir(root_dir) self.len = len(frames) - 1 frames = [root_dir + "/" + frame for frame in frames] frames = np.array(frames) frames.sort() self.x = [] for i in xrange(self.len): self.x.append([frames[i], frames[i + 1]]) self.x = np.array(self.x) # code for previous rectange init_bbox = bbox_coordinates(self.x[0][0]) print init_bbox self.prev_rect = init_bbox
def main(): global args args = parser.parse_args() print(args) np.random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) if use_gpu: torch.cuda.manual_seed(args.manual_seed) # load datasets alov = ALOVDataset('../data/alov300/imagedata++/', '../data/alov300/alov300++_rectangleAnnotation_full/', transform) imagenet = ILSVRC2014_DET_Dataset('../data/imagenet_img/', '../data/imagenet_bbox/', transform, args.lambda_shift_frac, args.lambda_scale_frac, args.min_scale, args.max_scale) # list of datasets to train on datasets = [alov, imagenet] # load model net = model.GoNet() loss_fn = torch.nn.L1Loss(size_average=False) if use_gpu: net = net.cuda() loss_fn = loss_fn.cuda() # initialize optimizer optimizer = optim.SGD(net.classifier.parameters(), lr=args.learning_rate, weight_decay=0.0005) if os.path.exists(args.save_directory): print('Directory %s already exists' % (args.save_directory)) else: os.makedirs(args.save_directory) # start training net = train_model(net, datasets, loss_fn, optimizer)
# cv2.rectangle(srch,(x1_sc,y1_sc),(x2_sc,y2_sc),(255,0,0),3) # cv2.rectangle(srch,(srch_xc-x_len,srch_yc-y_len),(srch_xc+x_len,srch_yc+y_len),(0,255,0),3) # cv2.rectangle(image_srch,(x1+x1_sc,y1+y1_sc),(x1+x2_sc,y1+y2_sc),(255,0,0),3) # cv2.rectangle(image_srch,(loc_srch[0],loc_srch[1]),(loc_srch[2],loc_srch[3]),(0,255,0),3) # cv2.imwrite("srch.jpg", srch) # cv2.imwrite("full_srch.jpg", image_srch) return pd,x1,y1 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") m = nn.DataParallel(model.GoNet()) if os.path.exists("go_turn.pth"): m.load_state_dict(torch.load("go_turn.pth")['state_dict']) print("pretrained model loaded") else: print("Couldnot find trained model. Exitting") sys.exit() m.to(device) m.eval() op_summary ="" acc = mm.MOTAccumulator(auto_id=True) folders = [700,800,900] for fldr in folders: if not os.path.exists("/ssd_scratch/cvit/bsr/"): op = subprocess.run(["mkdir","/ssd_scratch/cvit/bsr"]) if not os.path.exists("/ssd_scratch/cvit/bsr/"+fldr):
def main(): global args args = parser.parse_args() print(args) np.random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) if use_gpu: torch.cuda.manual_seed(args.manual_seed) # load datasets alov = ALOVDataset('../data/alov300/imagedata++/', '../data/alov300/alov300++_rectangleAnnotation_full/', transform) imagenet = ILSVRC2014_DET_Dataset('../data/imagenet_img/', '../data/imagenet_bbox/', transform, args.lambda_shift_frac, args.lambda_scale_frac, args.min_scale, args.max_scale) # list of datasets to train on datasets = [alov, imagenet] # load model net = model.GoNet() loss_fn = torch.nn.L1Loss(size_average=False) if use_gpu: net = net.cuda() loss_fn = loss_fn.cuda() # initialize optimizer trainable_weights = [] trainable_bias = [] for name, param in net.classifier.named_parameters(): if 'weight' in name: trainable_weights.append(param) elif 'bias' in name: trainable_bias.append(param) optimizer = optim.SGD([{ 'params': trainable_weights, 'lr': args.learning_rate * 10 }, { 'params': trainable_bias, 'lr': args.learning_rate * 20 }], lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if os.path.exists(args.save_directory): print('Directory %s already exists' % (args.save_directory)) else: os.makedirs(args.save_directory) # start training net = train_model(net, datasets, loss_fn, optimizer) # save trained model path = os.path.join(args.save_directory, 'final_model.pth') torch.save(net.state_dict(), path)