inp_dim = net_options['height'] # Assign from the command line args lr = args.lr wd = args.wd momentum = args.mom momentum = 0.9 wd = 0.0005 inp_dim = int(inp_dim) num_classes = int(num_classes) bs = int(bs) transforms = Sequence([YoloResize(inp_dim)]) data = CustomDataset(root = "data", ann_file="data/train.txt", det_transforms=transforms) data_loader = DataLoader(data, batch_size=bs) optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=wd) def logloss(pred, target): assert pred.shape == target.shape, "Input and target must be the same shape" pred = pred.view(-1,1) target = target.view(-1,1) sigmoid = torch.nn.Sigmoid()(pred) sigmoid = sigmoid.repeat(1,2) sigmoid[:,0] = 1 - sigmoid[:,0]
print('-------------------------------') return total_loss ### DATA ### # Overloading custom data transforms from customloader (may add more here) # custom_transforms = Sequence([RandomHSV(hue=hue, saturation=saturation, brightness=exposure), # YoloResizeTransform(inp_dim), Normalize()]) # custom_transforms = Sequence([Normalize(), YoloResize(inp_dim) ]) # Data instance and loader data = CustomDataset(root=args.dataDir, num_classes=num_classes, ann_file="data/train_img_list.txt", cfg_file=args.cfgfile, det_transforms='', random_data = args.random_img, rgb_mean=rgb_mean, inp_dim = inp_dim ) print('Batch size ', bs) data_loader = DataLoader(data, batch_size=bs, shuffle=True, collate_fn=data.collate_fn) iterations = len(data)//bs print('Size of data / batch size (iterations) = {}'.format(iterations)) ### TRAIN MODEL ### # Freeze layers according to user specification
# Load weights PyTorch style model.load_state_dict(torch.load(args.weightsfile)) model = model.to(device) ## Really? You're gonna eval on the CPU? :) # Set to evaluation (don't accumulate gradients) # Make sure to call eval() method after loading weights model.eval() # Load test data transforms = Sequence( [Equalize(), YoloResizeTransform(model_dim), Normalize()]) test_data = CustomDataset(root="data", ann_file="data/test.txt", det_transforms=transforms, cfg_file=args.cfgfile, num_classes=num_classes) # test_loader = DataLoader(test_data, batch_size=1) ground_truths_all = [] predictions_all = [] num_gts = 0 # Make a directory for the image files with their bboxes eval_output_dir = os.path.split(test_data.examples[0].rstrip())[0].replace( 'obj', 'eval_output') os.makedirs(eval_output_dir, exist_ok=True) for i in range(len(test_data)): img_file = test_data.examples[i].rstrip()
import sys import torch.nn.functional as F sys.path.append("./utils_2016314726") from customloader import CustomDataset train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(0.5), transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize((0.4911, 0.4824, 0.4462), (0.2469, 0.2434, 0.2617)),]) test_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4911, 0.4824, 0.4462), (0.2469, 0.2434, 0.2617)),]) trainset = CustomDataset("./data/train/x.npy", "./data/train/y.npy", transform=train_transform) validset = CustomDataset("./data/train/x.npy", "./data/train/y.npy", transform=test_transform) batch_size = 128 balance_val_index = [] for label in range(0, 91): label_index = (trainset.targets == label).nonzero() random_index = torch.randperm(label_index.size(0)) if balance_val_index: balance_val_index[0] = torch.cat([balance_val_index[0], label_index[random_index][:1].view(-1, )]) else: balance_val_index.append(label_index[random_index][:1].view(-1, )) balance_val_index = balance_val_index[0]
total_loss += cls_loss return total_loss ### DATA ### # Overloading custom data transforms from customloader (may add more here) # custom_transforms = Sequence([RandomHSV(hue=hue, saturation=saturation, brightness=exposure), # YoloResizeTransform(inp_dim)]) custom_transforms = Sequence([YoloResizeTransform(inp_dim), Normalize()]) # Data instance and loader data = CustomDataset(root="data", num_classes=num_classes, ann_file="data/train.txt", cfg_file=args.cfgfile, det_transforms=custom_transforms) print('Batch size ', bs) data_loader = DataLoader(data, batch_size=bs, shuffle=True, collate_fn=data.collate_fn) ### TRAIN MODEL ### # Use this optimizer calculation for training loss optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=wd)
wd = args.wd momentum = args.mom momentum = 0.9 wd = 0.0005 inp_dim = int(inp_dim) num_classes = int(num_classes) bs = int(bs) # Overloading custom data transforms from customloader (may add more here) custom_transforms = Sequence([YoloResizeTransform(inp_dim)]) # Data instance and loader data = CustomDataset( root="data", num_classes=num_classes, ann_file= "/home/gunjan/Desktop/Humanoid/pytorch-yolo-v3-custom/data_output/data/train.txt", det_transforms=custom_transforms) print('Batch size ', bs) data_loader = DataLoader(data, batch_size=bs, shuffle=False, collate_fn=data.collate_fn) # Use this optimizer calculation for training loss optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=wd)
out = self.dense(out) return out device = 'cuda' if torch.cuda.is_available() else 'cpu' test_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4911, 0.4824, 0.4462), (0.2469, 0.2434, 0.2617)), ]) model = torch.load('model_2016314726.pth', map_location=torch.device(device)) testset = CustomDataset("./data/test/x.npy", "./data/test/y.npy", transform=test_transform) test_loader = torch.utils.data.DataLoader( testset, batch_size=128, ) model.eval() correct = 0 total = 0 with torch.no_grad(): for image, label in test_loader: x = image.to(device)