try: os.makedirs(opt.outf) except OSError: pass classifier = PointNetCls(k = num_classes,views=opt.n_views) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.SGD(classifier.parameters(), lr=opt.lr, momentum=opt.momentum) classifier.cuda() num_batch = len(dataset)/opt.batchSize for epoch in range(opt.nepoch): for i, data in enumerate(dataloader, 0): points, target = data points, target = Variable(points), Variable(target[:,0]) points = points.transpose(2,1) # sys.exit(0) points, target = points.cuda(), target.cuda() optimizer.zero_grad() classifier = classifier.train() pred = classifier(points) loss = F.nll_loss(pred, target) loss.backward()
print(len(dataset), len(test_dataset)) num_classes = len(dataset.obj_list) print('classes', num_classes) try: os.makedirs(opt.outf) except OSError: pass classifier = PointNetCls(k=num_classes, num_points=opt.num_points) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) classifier = classifier.cuda() optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) # if torch.cuda.is_available(): # classifier.cuda() num_batch = len(dataset) / opt.batchSize for epoch in range(opt.nepoch): for i, data in enumerate(dataloader, 0): points, target = data points, target = Variable(points), Variable(target[:, 0]) points = points.transpose(2, 1) # if torch.cuda.is_available(): # points, target = points.cuda(), target.cuda() points = points.cuda()
try: os.makedirs(opt.outf) except OSError: pass classifier = PointNetCls(k = num_classes, num_points = opt.num_points) if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) classifier.cuda() num_batch = len(dataset)/opt.batchSize for epoch in range(opt.nepoch): for i, data in enumerate(dataloader, 0): points, target = data points, target = Variable(points), Variable(target[:,0]) points = points.transpose(2,1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() classifier = classifier.train() pred, _ = classifier(points) loss = F.nll_loss(pred, target) loss.backward() optimizer.step()
# create output folder try: os.makedirs(opt.outf) except OSError: pass # use PointNet network classifier = PointNetCls(k = num_classes, num_points = opt.num_points) # Load model if so chosen if opt.model != '': classifier.load_state_dict(torch.load(opt.model)) # standard optimiser optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) classifier.cuda() # enable cuda num_batch = len(dataset)/opt.batchSize # Create variables to save scores testLoss = [] testAccuracy = [] # Create scores folder if not existing if not os.path.exists(opt.scoresFolder): os.makedirs(opt.scoresFolder) # Create a folder for the scores and if not os.path.exists(opt.scoresFolder + "/predictions/"): os.makedirs(opt.scoresFolder + "/predictions/")
) # define something about training... mynet = PointNetCls() optimizer = torch.optim.Adam(mynet.parameters(), lr=LR) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.8) loss_func = torch.nn.MSELoss() # train myepoch = tqdm(range(1, 500)) for epoch in myepoch: loss_list = [] valid_loss_list = [] for step, (features, targets) in enumerate(train_loader): mynet.cuda() mynet.train() features = features.transpose(2, 1) features, targets = features.cuda(), targets.cuda() predicted_targets, feature_transform_matrix = mynet(features) loss = loss_func(targets, predicted_targets) loss = (loss + mynet.feature_transform_regularizer(feature_transform_matrix) * 0.001) optimizer.zero_grad() loss.backward() optimizer.step() loss_list.append(loss.cpu().data.numpy()) ave_loss = np.array(loss_list).mean()