def validate(loader, num_classes, device, net, scheduler, criterion): num_samples = 0 running_loss = 0 metrics = Metrics(range(num_classes)) net.eval() for images, masks in tqdm.tqdm(loader): images = images.to(device, dtype=torch.float) masks = masks.to(device) num_samples += int(images.size(0)) outputs = net(images) loss = criterion(outputs, masks) running_loss += loss.item() for mask, output in zip(masks, outputs): metrics.add(mask, output) assert num_samples > 0, "dataset contains validation images and labels" scheduler.step(metrics.get_miou()) # update learning rate return { "loss": running_loss / num_samples, "miou": metrics.get_miou(), "fg_iou": metrics.get_fg_iou(), "mcc": metrics.get_mcc(), }
def train(loader, num_classes, device, net, optimizer, criterion): num_samples = 0 running_loss = 0 metrics = Metrics(range(num_classes)) net.train() for images1,images2, masks in tqdm.tqdm(loader): images1 = images1.to(device) images2 = images2.to(device) masks = masks.to(device) assert images1.size()[2:] == images2.size()[2:] == masks.size()[2:], "resolutions for images and masks are in sync" num_samples += int(images1.size(0)) #print(num_samples) optimizer.zero_grad() outputs = net(images1,images2) #print(outputs.shape,masks.shape) #masks = masks.view(batch_size,masks.size()[2],masks.size()[3]) #print(masks.shape) #masks = masks.squeeze() assert outputs.size()[2:] == masks.size()[2:], "resolutions for predictions and masks are in sync" assert outputs.size()[1] == num_classes, "classes for predictions and dataset are in sync" loss = criterion(outputs, masks.float()) ##BCELoss #loss = criterion(outputs, masks.long()) loss.backward() optimizer.step() running_loss += loss.item() for mask, output in zip(masks, outputs): prediction = output.detach() metrics.add(mask, prediction) assert num_samples > 0, "dataset contains training images and labels" return { "loss": running_loss / num_samples, "precision": metrics.get_precision(), "recall": metrics.get_recall(), "f_score": metrics.get_f_score(), "oa":metrics.get_oa() }
def train(loader, num_classes, device, net, optimizer, criterion): num_samples = 0 running_loss = 0 metrics = Metrics(range(num_classes)) net.train() for images, masks in tqdm.tqdm(loader): images = torch.squeeze(images.to(device, dtype=torch.float)) masks = torch.squeeze(masks.to(device)) # print("images'size:{},masks'size:{}".format(images.size(),masks.size())) assert images.size()[2:] == masks.size( )[1:], "resolutions for images and masks are in sync" num_samples += int(images.size(0)) optimizer.zero_grad() outputs0, outputs1, outputs2, outputs3, outputs0_2, outputs1_2, outputs2_2, outputs3_2 = net( images) loss0 = criterion(outputs0, masks) loss1 = criterion(outputs1, masks) loss2 = criterion(outputs2, masks) loss3 = criterion(outputs3, masks) loss0_2 = criterion(outputs0_2, masks) loss1_2 = criterion(outputs1_2, masks) loss2_2 = criterion(outputs2_2, masks) loss3_2 = criterion(outputs3_2, masks) loss = loss0 + loss1 + loss2 + loss3 + loss0_2 + loss1_2 + loss2_2 + loss3_2 loss.backward() batch_loss = loss.item() optimizer.step() running_loss += batch_loss outputs = (outputs0_2 + outputs1_2 + outputs2_2 + outputs3_2) / 4 for mask, output in zip(masks, outputs): prediction = output.detach() metrics.add(mask, prediction) assert num_samples > 0, "dataset contains training images and labels" return { "loss": running_loss / num_samples, "miou": metrics.get_miou(), "fg_iou": metrics.get_fg_iou(), "mcc": metrics.get_mcc(), }
def train(loader, num_classes, device, net, optimizer, criterion): global global_step num_samples = 0 running_loss = 0 metrics = Metrics(range(num_classes)) net.train() for images, masks, dwm in tqdm.tqdm(loader): images = images.to(device, dtype=torch.float) masks = masks.to(device) dwm = torch.squeeze(dwm.to(device)) # print("images'size:{},masks'size:{}".format(images.size(),masks.size())) num_samples += int(images.size(0)) optimizer.zero_grad() outputs, dsv4, dsv3, dsv2 = net(images) l2 = criterion(dsv2, masks, dwm) l3 = criterion(dsv3, masks, dwm) l4 = criterion(dsv4, masks, dwm) loss_fuse = criterion(outputs, masks, dwm) loss = (l2 + l3 + l4 + loss_fuse) / 4 loss.backward() batch_loss = loss.item() optimizer.step() global_step = global_step + 1 running_loss += batch_loss for mask, output in zip(masks, outputs): prediction = output.detach() metrics.add(mask, prediction) assert num_samples > 0, "dataset contains training images and labels" return { "loss": running_loss / num_samples, "miou": metrics.get_miou(), "fg_iou": metrics.get_fg_iou(), "mcc": metrics.get_mcc(), }
def validate(loader, num_classes, device, net, scheduler, criterion): num_samples = 0 running_loss = 0 metrics = Metrics(range(num_classes)) net.eval() for images, masks in tqdm.tqdm(loader): images = torch.squeeze(images.to(device, dtype=torch.float)) masks = torch.squeeze(masks.to(device).long()) assert images.size()[2:] == masks.size( )[1:], "resolutions for images and masks are in sync" num_samples += int(images.size(0)) outputs0, outputs1, outputs2, outputs3, outputs0_2, outputs1_2, outputs2_2, outputs3_2 = net( images) loss0 = criterion(outputs0, masks) loss1 = criterion(outputs1, masks) loss2 = criterion(outputs2, masks) loss3 = criterion(outputs3, masks) loss0_2 = criterion(outputs0_2, masks) loss1_2 = criterion(outputs1_2, masks) loss2_2 = criterion(outputs2_2, masks) loss3_2 = criterion(outputs3_2, masks) loss = loss0 + loss1 + loss2 + loss3 + loss0_2 + loss1_2 + loss2_2 + loss3_2 running_loss += loss.item() outputs = (outputs0_2 + outputs1_2 + outputs2_2 + outputs3_2) / 4 for mask, output in zip(masks, outputs): metrics.add(mask, output) assert num_samples > 0, "dataset contains validation images and labels" scheduler.step(metrics.get_miou()) # update learning rate return { "loss": running_loss / num_samples, "miou": metrics.get_miou(), "fg_iou": metrics.get_fg_iou(), "mcc": metrics.get_mcc(), }
def train(loader, num_classes, device, net, optimizer, criterion): num_samples = 0 running_loss = 0 metrics = Metrics(range(num_classes)) net.train() for images, masks in tqdm.tqdm(loader): images = images.to(device) masks = masks.to(device) assert images.size()[2:] == masks.size( )[1:], "resolutions for images and masks are in sync" num_samples += int(images.size(0)) optimizer.zero_grad() outputs = net(images) assert outputs.size()[2:] == masks.size( )[1:], "resolutions for predictions and masks are in sync" assert outputs.size( )[1] == num_classes, "classes for predictions and dataset are in sync" loss = criterion(outputs, masks) loss.backward() optimizer.step() running_loss += loss.item() for mask, output in zip(masks, outputs): prediction = output.detach() metrics.add(mask, prediction) assert num_samples > 0, "dataset contains training images and labels" return { "loss": running_loss / num_samples, "miou": metrics.get_miou(), "fg_iou": metrics.get_fg_iou(), "mcc": metrics.get_mcc(), }
def validate(loader, num_classes, device, net, criterion): num_samples = 0 running_loss = 0 metrics = Metrics(range(num_classes)) net.eval() for images, masks, in tqdm.tqdm(loader): images = images.to(device) masks = masks.to(device) assert images.size()[2:] == masks.size( )[1:], "resolutions for images and masks are in sync" num_samples += int(images.size(0)) outputs = net(images) assert outputs.size()[2:] == masks.size( )[1:], "resolutions for predictions and masks are in sync" assert outputs.size( )[1] == num_classes, "classes for predictions and dataset are in sync" loss = criterion(outputs, masks) running_loss += loss.item() for mask, output in zip(masks, outputs): metrics.add(mask, output) assert num_samples > 0, "dataset contains validation images and labels" return { "loss": running_loss / num_samples, "miou": metrics.get_miou(), "fg_iou": metrics.get_fg_iou(), "mcc": metrics.get_mcc(), }
def validate(loader, num_classes, device, net, criterion): num_samples = 0 running_loss = 0 metrics = Metrics(range(num_classes)) net.eval() for images1, images2, masks, in tqdm.tqdm(loader): images1 = images1.to(device) images2 = images2.to(device) masks = masks.to(device) assert images1.size()[2:] == images2.size()[2:] == masks.size()[2:], "resolutions for images and masks are in sync" num_samples += int(images1.size(0)) outputs = net(images1,images2) assert outputs.size()[2:] == masks.size()[2:], "resolutions for predictions and masks are in sync" assert outputs.size()[1] == num_classes, "classes for predictions and dataset are in sync" loss = criterion(outputs, masks.float()) ##BCELoss #loss = criterion(outputs, masks.long()) running_loss += loss.item() for mask, output in zip(masks, outputs): metrics.add(mask, output) assert num_samples > 0, "dataset contains validation images and labels" return { "loss": running_loss / num_samples, "precision": metrics.get_precision(), "recall": metrics.get_recall(), "f_score": metrics.get_f_score(), "oa":metrics.get_oa() }
grads = tape.gradient(g_loss_total, generator.trainable_variables) optimizer_g.apply_gradients(zip(grads, generator.trainable_variables), global_step=global_step) # Record progress # ---------------------------------------------- # Cache training metrics and print to terminal if (batch + 1) % sample_interval == 0: elapsed_time = datetime.datetime.now() - start_time train_metrics.add({ 'epoch': epoch, 'iters': batch + 1, 'G_lr': optimizer_g._lr_t.numpy(), 'D_lr': optimizer_d._lr_t.numpy(), 'G_L1': g_loss_L1.numpy(), 'G_GAN': g_loss_gan.numpy(), 'G_total': g_loss_total.numpy(), 'D_loss': d_loss_total.numpy(), 'time': elapsed_time, 'random_seed': seed }) train_metrics.to_csv() # Update plot displayed on training webpage train_metrics.plot(metric_keys, metrics_plt_filepath) # Build final results page # -------------------------------------------------------- gen_checkpoint(gan, check_loader, epochs, checkpoints_pth) build_results_page(epochs, checkpoints_pth, checkpoint_dir_labels, metrics_plt_filepath, html_filepath)