Example #1
0
    help=
    '.csv file that stores the average and mean values required for normalization process'
)
parser.add_argument('-w',
                    '--workers',
                    default=1,
                    help='Number of workers for batch processing',
                    type=int)
args = parser.parse_args()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

test_stats = dnn.read_stats(args.dataloc + "test/stats")
test_dataset = dnn.DroneDataset(root_dir=args.dataloc + "test/",
                                transform=transforms.Compose([
                                    dnn.RandomCrop(224),
                                    dnn.Normalize(test_stats),
                                    dnn.ToTensor()
                                ]))
test_loader = DataLoader(test_dataset,
                         batch_size=args.batch,
                         shuffle=True,
                         num_workers=args.workers)

# Pretrained
network = models.resnet152(pretrained=True)
for param in network.parameters():
    param.requires_grad = False

num_ftrs = network.fc.in_features
network.fc = nn.Sequential(nn.Linear(num_ftrs, 1), nn.Sigmoid())
network = network.to(device)
Example #2
0
    help=
    '.csv file that stores the average and mean values required for normalization process'
)
parser.add_argument('-w',
                    '--workers',
                    default=1,
                    help='Number of workers for batch processing',
                    type=int)
args = parser.parse_args()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
val_stats = dnn.read_stats(args.dataloc + "val/stats")

val_dataset = dnn.DroneDataset(root_dir=args.dataloc + "val/",
                               transform=transforms.Compose([
                                   dnn.Resize(224),
                                   dnn.Normalize(val_stats),
                                   dnn.ToTensor()
                               ]))
val_loader = DataLoader(val_dataset,
                        batch_size=args.batch,
                        shuffle=True,
                        num_workers=args.workers)

network = models.resnet50(pretrained=True)

#for param in network.parameters():
#    param.requires_grad = False

num_ftrs = network.fc.in_features
network.fc = nn.Sequential(nn.Linear(num_ftrs, 1), nn.Sigmoid())
network = torch.nn.DataParallel(network).to(device)
Example #3
0
parser.add_argument('-o', '--open', default=None, help='Open the specified model for loading and further training')
parser.add_argument('-b', '--batch', default=5, help='Specified batch size for training', type=int) 
parser.add_argument('-e', '--epochs', default=30, help='Number of epochs to train', type=int)
parser.add_argument('-l', '--lr', default=1e-5, help='Learning rate set for the optimizer', type=float)
parser.add_argument('-m', '--momentum', default=0.9, help='Momentum set for the optimizer', type=float)
parser.add_argument('-L', '--logdir', default="training_logs/", help="Folder where the training logs are saved")
parser.add_argument('-r', '--saverate', default=5, help='The interval to save model states', type=int)
parser.add_argument('-w', '--workers', default=4, help='Number of workers for batch processing', type=int)
parser.add_argument('-n', '--notes', default="", help='Additional notes regarding the training')
args = parser.parse_args()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
training_stats = dnn.read_stats(args.dataloc + "train/stats")
val_stats = dnn.read_stats(args.dataloc + "val/stats")

train_dataset = dnn.DroneDataset(root_dir=args.dataloc + "train/", transform=transforms.Compose([ dnn.Resize(300), dnn.Corrupt(0.2), dnn.FlipHzt(0.1), dnn.ContrastBrightness(3.0, 100, 0.1), dnn.Rotate(50.0, 0.1), dnn.RandomCrop(224), dnn.Normalize(training_stats), dnn.ToTensor()]))
train_loader = DataLoader(train_dataset, batch_size=args.batch, shuffle=True, num_workers=args.workers)

val_dataset = dnn.DroneDataset(root_dir=args.dataloc + "val/", transform=transforms.Compose([ dnn.Resize(224), dnn.Normalize(val_stats), dnn.ToTensor()]))
val_loader = DataLoader(val_dataset, batch_size=args.batch, shuffle=True, num_workers=args.workers)

network = models.resnet50(pretrained=True)

#for param in network.parameters():
#    param.requires_grad = False

num_ftrs = network.fc.in_features
network.fc = nn.Sequential(nn.Linear(num_ftrs, 1), nn.Sigmoid())
network = torch.nn.DataParallel(network).to(device)

#optimizer = torch.optim.Adam(network.parameters(), lr=args.lr)
Example #4
0
parser.add_argument('-d', '--dataloc', default="datasets/", help='Path to the datasets')
parser.add_argument('-S', '--save_dir', default="saved_models/", help='Path to save trained models')
parser.add_argument('-o', '--open', default=None, help='Open the specified model for loading and further training')
parser.add_argument('-b', '--batch', default=5, help='Specified batch size for training', type=int) 
parser.add_argument('-e', '--epochs', default=30, help='Number of epochs to train', type=int)
parser.add_argument('-l', '--lr', default=1e-4, help='Learning rate set for the optimizer', type=float)
parser.add_argument('-s', '--stats', default='stats', help='Path to file that stores the average and mean values required for normalization process')
parser.add_argument('-n', '--basename', default='net', help='Base name used to identify the particular training set')
parser.add_argument('-r', '--saverate', default=5, help='The interval to save model states', type=int)
parser.add_argument('-w', '--workers', default=4, help='Number of workers for batch processing', type=int)

args = parser.parse_args()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
stats = dnn.read_stats(args.dataloc + args.stats)

dataset = dnn.DroneDataset(root_dir=args.dataloc, transform=transforms.Compose([ dnn.Scale(0.44444), dnn.Normalize(stats), dnn.ToTensor()]))
loader = DataLoader(dataset, batch_size=args.batch, shuffle=True, num_workers=args.workers)

network = model.Net().to(device)
optimizer = torch.optim.Adadelta(network.parameters(), lr=args.lr)
print("Model created")

model_name = args.open
if model_name is not None:
    dnn.load_model(network, optimizer, model_name)

## Start Training
dnn.train(args, network, optimizer, loader)