lr=max_lr, momentum=momentum, weight_decay=weight_decay) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, scheduler_step, min_lr) # Load data train_id = fold_train[idx] val_id = fold_valid[idx] X_train, y_train = trainImageFetch(train_id) X_val, y_val = trainImageFetch(val_id) train_data = DataSource1(X_train, mode='train', mask_list=y_train, fine_size=fine_size, pad_left=pad_left, pad_right=pad_right) train_loader = DataLoader(train_data, shuffle=RandomSampler(train_data), batch_size=batch_size, num_workers=8, pin_memory=True) val_data = DataSource1(X_val, mode='val', mask_list=y_val, fine_size=fine_size, pad_left=pad_left, pad_right=pad_left) val_loader = DataLoader(val_data,
for i in range(5): # Load test data image_test, _ = trainImageFetch(test_id[i]) overall_pred = np.zeros((len(test_id[i]), 202, 202), dtype=np.float32) # Get model salt = EncNet(1) salt = salt.to(device) pred_null = [] pred_flip = [] # Load weight param = torch.load(save_weight + weight_name +str(i)+ '.pth') salt.load_state_dict(param) # Create DataLoader test_data = DataSource1(image_test, mode='test', fine_size=fine_size, pad_left=pad_left, pad_right=pad_right) test_loader = DataLoader( test_data, shuffle=False, batch_size=batch_size, num_workers=8, pin_memory=True) # Prediction with no TTA test data salt.eval() print(len(test_loader)) for images in tqdm(test_loader, total=len(test_loader)): images = images.to(device) with torch.set_grad_enabled(False): pred = salt(images)[0] # pred = salt(images) pred = F.sigmoid(pred).squeeze(1).cpu().numpy()