Example #1
0
test_file_list = [f.split('/')[-1].split('\\')[-1][:-4] for f in test_file_list] 

#five fold generator
fold = KFold(n_splits = 5, shuffle = True, random_state = 42)

for j, idx in enumerate(fold.split(file_list)):
    j = 2
    train_idx = idx[0]
    val_idx = idx[1]

    #20 % train/val split
    file_list_val = list(map(file_list.__getitem__,val_idx))
    file_list_train = list(map(file_list.__getitem__,train_idx))

    #define dataset iterators
    dataset = TGSSaltDataset(train_path, file_list_train, augmentation = True)
    dataset_val = TGSSaltDataset(train_path, file_list_val)
    test_dataset = TGSSaltDataset(test_path, test_file_list, is_test = True)

    if '34' in parameter_path:
        model = get_model(num_classes = 1, num_filters = 32, pretrained = True)
    else:
        model = get_model(encoder_depth = 101, num_classes = 1, num_filters=32, dropout_2d=0.2, pretrained=True, is_deconv=True)
    
    model.load_state_dict(torch.load('../torch_parameters/' + parameter_path + '/model-' + str(j) + '.pt'))
    model.train(False)


    #training parameters
    epoch = 100
    learning_rate = 1e-3
Example #2
0
test_file_list = glob.glob(os.path.join(test_path, 'images', '*.png'))
test_file_list = [f.split('/')[-1].split('\\')[-1][:-4] for f in test_file_list] 

#five fold generator
fold = StratifiedKFold(n_splits = 10, shuffle = True, random_state = 42)
folds = list(fold.split(file_list,mask_class))

train_idx = folds[0][0]
val_idx = folds[0][1]

#20 % train/val split
file_list_val = list(map(file_list.__getitem__,val_idx))
file_list_train = list(map(file_list.__getitem__,train_idx))

#define dataset iterators
dataset = TGSSaltDataset(train_path, file_list_train)
dataset_val = TGSSaltDataset(train_path, file_list_val)
test_dataset = TGSSaltDataset(test_path, test_file_list, is_test = True)

class ConvBnRelu(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size):
        super().__init__()
        self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels,  kernel_size = kernel_size, padding=1),
                                  nn.BatchNorm2d(out_channels),
                                  nn.ReLU(inplace=True)
                                  )

    def forward(self, x):
        return self.conv(x)
    
class StackingFCN(nn.Module):
Example #3
0
    'CV5_resnext50_weighted_loss_no_drop_low_pixels_two_stage_SE_stratified_on_plateau_adam_hyper_decoder_gated'
]
submission_name = 'CV5_resnext50-101_weighted_loss_no_drop_low_pixels_two_stage_SE_stratified_on_plateau_adam_hyper_decoder_gated_finetuned_v2-tta.csv'

modelss = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]
weights = [0.7, 0.3]

model_predictions = []

test_path = '../test'
#test files
test_file_list = glob.glob(os.path.join(test_path, 'images', '*.png'))
test_file_list = [
    f.split('/')[-1].split('\\')[-1][:-4] for f in test_file_list
]
test_dataset = TGSSaltDataset(test_path, test_file_list, is_test=True)

sigmoid = nn.Sigmoid()

for k, parameter_path in enumerate(parameter_path_list):
    models = modelss[k]
    for j in models:
        #load best model
        if '101' in parameter_path:
            model = get_model_152(num_classes=1)
        else:
            model = get_model_101(num_classes=1)

        model.load_state_dict(
            torch.load('../torch_parameters/' + parameter_path + '/model-' +
                       str(j) + '.pt'))
Example #4
0
#20 % train/val split
file_list_val = list(map(file_list.__getitem__, val_idx))
file_list_train = list(map(file_list.__getitem__, train_idx))

#redo mask list
mask_paths = [
    train_path + '/masks/' + str(file) + '.png' for file in file_list_train
]
mask_class = [
    get_mask_type(np.transpose(cv2.imread(mask), (2, 0, 1))[0, :, :] / 255)
    for mask in mask_paths
]

#define dataset iterators
dataset = TGSSaltDataset(train_path,
                         file_list_train,
                         augmentation=True,
                         classes=mask_class)
dataset_val = TGSSaltDataset(train_path, file_list_val)

#define resnext50
model_50 = get_model_50(num_classes=1)
model_50.load_state_dict(
    torch.load('../torch_parameters/' + parameter_path_50 + '/model-' +
               str(fold_to_test) + '.pt'))

#define resnext101
model_101 = get_model_101(num_classes=1)
model_101.load_state_dict(
    torch.load('../torch_parameters/' + parameter_path_101 + '/model-' +
               str(fold_to_test) + '.pt'))
Example #5
0
#redo file list
#file_list = [file for file in file_list if file not in removed_images]

#10 % train/val split
file_list_val = file_list[::10]  #every 10th image into validation
file_list_train = [f for f in file_list if f not in file_list_val]

#test files
test_file_list = glob.glob(os.path.join(test_path, 'images', '*.png'))
test_file_list = [
    f.split('/')[-1].split('\\')[-1][:-4] for f in test_file_list
]

#define dataset iterators
dataset = TGSSaltDataset(train_path, file_list_train, augmentation=True)
dataset_val = TGSSaltDataset(train_path, file_list_val)
test_dataset = TGSSaltDataset(test_path, test_file_list, is_test=True)

#load best model
model = get_model(num_classes=1, num_filters=32, pretrained=True)
model.load_state_dict(
    torch.load('../torch_parameters/best_model_albunet_drop_pretrained.pt'))

model.train(False)
image = test_dataset.__getitem__(555)
with torch.no_grad():
    y_pred = model(image[0].unsqueeze(0).cuda()).cpu().data.numpy()
    y_pred_flipped = to_tensor(
        hflip(
            to_pil_image(