def eval(cfg, model, loader, criterion, publisher="test"): model.eval() # metrics acc_meter = MultiAssessmentMeter(num_classes=cfg.dataset.num_classes, metrics=["class", "overall", "iou"]) batch_loss = LossMeter() meters = (acc_meter, batch_loss) with torch.no_grad(): for _, data in enumerate(loader): _ = processing(model, criterion, data, meters, cfg.general.device) # get epoch loss and accuracy epoch_loss = batch_loss.compute() epoch_acc = acc_meter.compute() # save loss and acc to tensorboard log_dict = { "{}/loss".format(publisher): epoch_loss, "{}/mAcc".format(publisher): epoch_acc["class"], "{}/oAcc".format(publisher): epoch_acc["overall"], "{}/IoU".format(publisher): epoch_acc["iou"] } return log_dict
def train(cfg, model, dataset, optimizer, criterion, scheduler, publisher="train"): model.train() loader = DataLoader( #Subset(dataset["train"],range(320)), dataset, batch_size=cfg.general.batch_size, num_workers=cfg.loader.nworkers, pin_memory=True, shuffle=True, ) # loader = tqdm(loader, ncols=100, desc=publisher) acc_meter = MultiAssessmentMeter(num_classes=dataset.num_classes, metrics=["class", "overall", "iou"]) batch_loss = LossMeter() meters = (acc_meter, batch_loss) for data in loader: optimizer.zero_grad() loss = processing(model, criterion, data, meters, cfg.general.device) loss.backward() optimizer.step() # scheduler.step(data[0].shape[0]) # for TF ExponentialDecay # nan if torch.isnan(loss): print("Train loss is nan.") exit() # update lr step # scheduler.step() # get epoch loss and acc train_loss = batch_loss.compute() train_acc = acc_meter.compute() # save loss and acc to tensorboard lr = scheduler.get_last_lr()[0] log_dict = { "lr": lr, "train/loss": train_loss, "train/mAcc": train_acc["class"], "train/oAcc": train_acc["overall"], "train/IoU": train_acc["iou"] } return log_dict
def train(cfg, model, dataset, optimizer, criterion, scheduler, publisher="train"): model.train() loader = DataLoader( #Subset(dataset["train"],range(320)), dataset, batch_size=cfg.batch_size, num_workers=cfg.nworkers, pin_memory=True, shuffle=True, collate_fn=rotation_and_jitter ) loader = tqdm(loader, ncols=100, desc=publisher) acc_meter = MultiAssessmentMeter(num_classes=cfg.num_classes, metrics=["class","overall"]) batch_loss = LossMeter() meters = (acc_meter, batch_loss) for lidx, (point_clouds, labels) in enumerate(loader): optimizer.zero_grad() data = (point_clouds, labels) loss = processing(cfg, model, criterion, data, meters) loss.backward() optimizer.step() # nan if torch.isnan(loss): print("Train loss is nan.") exit() # update lr step scheduler.step() # get epoch loss and acc train_loss = batch_loss.compute() train_acc = acc_meter.compute() print('-> Train loss: {} mAcc: {} oAcc: {}'.format(train_loss, train_acc["class"], train_acc["overall"])) # save loss and acc to tensorboard lr = scheduler.get_last_lr()[0] log_dict = { "lr": lr, "train/loss": train_loss, "train/mAcc": train_acc["class"], "train/oAcc": train_acc["overall"] } return log_dict
def train(cfg, model, loader, optimizer, criterion, scheduler, publisher="train"): model.train() # metrics acc_meter = MultiAssessmentMeter(num_classes=cfg.dataset.num_classes, metrics=["class", "overall", "iou"]) batch_loss = LossMeter() meters = (acc_meter, batch_loss) for _, data in enumerate(loader): optimizer.zero_grad() loss = processing(model, criterion, data, meters, cfg.general.device) loss.backward() optimizer.step() # nan if torch.isnan(loss): print("Training loss is nan.") exit() scheduler.step() # get epoch loss and accuracy epoch_loss = batch_loss.compute() epoch_acc = acc_meter.compute() # save loss and acc to tensorboard lr = scheduler.get_last_lr()[0] log_dict = { "lr": lr, "{}/loss".format(publisher): epoch_loss, "{}/mAcc".format(publisher): epoch_acc["class"], "{}/oAcc".format(publisher): epoch_acc["overall"], "{}/IoU".format(publisher): epoch_acc["iou"] } return log_dict
def eval(cfg, model, dataset, criterion, publisher="test"): model.eval() loader = DataLoader(dataset, batch_size=cfg.batch_size, num_workers=cfg.nworkers, pin_memory=True) loader = tqdm(loader, ncols=100, desc=publisher) acc_meter = MultiAssessmentMeter(num_classes=cfg.num_classes, metrics=["class", "overall"]) batch_loss = LossMeter() with torch.no_grad(): for lidx, (point_clouds, labels) in enumerate(loader): data = (point_clouds, labels) meters = (acc_meter, batch_loss) _ = processing(cfg, model, criterion, data, meters) # get epoch loss and acc test_loss = batch_loss.compute() test_acc = acc_meter.compute() print('-> {} loss: {} mAcc: {} oAcc: {}'.format(publisher, test_loss, test_acc["class"], test_acc["overall"]))
def test(cfg, model, dataset, criterion, publisher="test"): model.eval() loader = DataLoader( #Subset(dataset["train"],range(320)), dataset, batch_size=cfg.general.batch_size, num_workers=cfg.loader.nworkers, pin_memory=True, ) # loader = tqdm(loader, ncols=100, desc=publisher) acc_meter = MultiAssessmentMeter(num_classes=dataset.num_classes, metrics=["class", "overall", "iou"]) batch_loss = LossMeter() meters = (acc_meter, batch_loss) with torch.no_grad(): for data in loader: loss = processing(model, criterion, data, meters, cfg.general.device) # get epoch loss and acc test_loss = batch_loss.compute() test_acc = acc_meter.compute() print('-> {} loss: {} mAcc: {} iou: {} oAcc: {}'.format( publisher, test_loss, test_acc["class"], test_acc["iou"], test_acc["overall"])) # save loss and acc to tensorboard log_dict = { "{}/loss".format(publisher): test_loss, "{}/mAcc".format(publisher): test_acc["class"], "{}/oAcc".format(publisher): test_acc["overall"], "{}/IoU".format(publisher): test_acc["iou"] } return log_dict