train_dset = model_def.SegmentationDataset(train_root, list_common_trans=common_transforms, list_img_trans=None, f_type="PIL", trim=TRIM) val_dset = model_def.SegmentationDataset(val_root, f_type="PIL", trim=TRIM) train_dset_loader = utils.data.DataLoader(train_dset, batch_size=BATCH_SIZE, shuffle=True) val_dset_loader = utils.data.DataLoader(val_dset, batch_size=BATCH_SIZE, shuffle=True) dset_loader_dict = {'train': train_dset_loader, 'val': val_dset_loader} criterion_loss = nn.BCELoss() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net = net.to(device) optimizer = optim.Adam(net.parameters()) trained_net, best_model_wts, epoch_loss_dict, batch_loss_dict = train_segmentation( net, EPOCH_COUNT, dset_loader_dict, criterion_loss, optimizer) cnn_utils.save_model(net, MODEL_NAME, best_model_wts, epoch_loss_dict, batch_loss_dict, MODEL_DETAILS, SAVE_ROOT) # net.load_state_dict(best_model_wts) # test_eval.do_in_sample_tests(net, "../../..")
train_dset = model_def.SegmentationDataset(train_root, list_common_trans=common_transforms, list_img_trans=None) val_dset = model_def.SegmentationDataset(val_root) train_dset_loader = utils.data.DataLoader(train_dset, batch_size=BATCH_SIZE, shuffle=True) val_dset_loader = utils.data.DataLoader(val_dset, batch_size=BATCH_SIZE, shuffle=True) dset_loader_dict = {'train': train_dset_loader, 'val': val_dset_loader} criterion_loss = nn.BCELoss() net = net.to(device) optimizer = optim.Adam(net.parameters()) trained_net, best_model_wts, training_hist = train_segmentation( net, EPOCH_COUNT, dset_loader_dict, criterion_loss, optimizer, detailed_time=True) cnn_utils.save_model(net, MODEL_NAME, best_model_wts, training_hist, MODEL_DETAILS, SAVE_ROOT)