예제 #1
0
def get_transform_fun(resized=False):
    if resized == True:
        transform_function = et.ExtCompose([
            et.ExtRandomCrop(size=2048),
            et.ExtRandomCrop(scale=0.7, size=None),
            et.ExtEnhanceContrast(),
            et.ExtRandomCrop(size=2048, pad_if_needed=True),
            et.ExtResize(scale=0.5),
            et.ExtRandomHorizontalFlip(p=0.5),
            et.ExtRandomCrop(size=512),
            et.ExtRandomVerticalFlip(p=0.5),
            et.ExtToTensor()
        ])
    else:
        transform_function = et.ExtCompose([
            et.ExtRandomCrop(size=256),
            et.ExtRandomHorizontalFlip(p=0.5),
            et.ExtRandomVerticalFlip(p=0.5),
            et.ExtEnhanceContrast(),
            et.ExtToTensor()
        ])
    return transform_function
예제 #2
0
model.eval()

data_loader = DataLoader(data_path=path_original_data,
                         metadata_path=path_meta_data)

file_names_val = np.array([
    image_name[:-8] for image_name in os.listdir(path_val)
    if image_name[-5] != "k"
])
file_names_val = file_names_val[file_names_val != ".DS_S"]

torch.manual_seed(1)

if not resize:
    transform_function = et.ExtCompose([
        et.ExtToTensor(),
        et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

else:
    transform_function = et.ExtCompose([
        et.ExtToTensor(),
        et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

denorm = Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if binary:
    color_dict = data_loader.color_dict_binary
    target_dict = data_loader.get_target_dict()
    annotations_dict = data_loader.annotations_dict

else:
import matplotlib.pyplot as plt
from object_detect.train_hpc import define_model
from torchvision.models.segmentation import deeplabv3_resnet101


def get_predictions(samples,ids,path_save,file_names):
    for (img, m), id in zip(samples, ids):
        image = (img[i].detach().cpu().numpy() * 255).transpose(1, 2, 0).astype(np.uint8)
        mask = m[i]
        for i in range(len(ids)):
            Image.fromarray(image.astype(np.uint8)).save(
                path_save + '/{}_img.png'.format(file_names[ids[i].numpy()[0]]),format='PNG')
            Image.fromarray(mask.astype(np.uint8)).save(
                path_save + '/{}_mask.png'.format(file_names[ids[i].numpy()[0]]), format='PNG')

transform_function = et.ExtCompose([et.ExtToTensor()])

HPC=False
splitted_data=True
binary=True
tif=False

if __name__ == '__main__':

    random_seed = 1
    torch.manual_seed(random_seed)
    np.random.seed(random_seed)
    random.seed(random_seed)

    device = torch.device('cpu')
    model_name = 'resnet50'
예제 #4
0
    file_names_train = file_names_train[file_names_train != ".DS_S"]

    file_names_val = np.array([
        image_name[:-4] for image_name in os.listdir(path_val)
        if image_name[-5] != "k"
    ])
    N_files = len(file_names_val)
    shuffled_index = np.random.permutation(len(file_names_val))
    file_names_val = file_names_val[shuffled_index]
    file_names_val = file_names_val[file_names_val != ".DS_S"]

    # #FOR EXTENDED DATASET EXPERIMENT
    transform_function = et.ExtCompose([
        et.ExtRandomHorizontalFlip(p=0.5),
        et.ExtRandomCrop(size=SIZE),
        et.ExtEnhanceContrast(),
        et.ExtRandomVerticalFlip(p=0.5),
        et.ExtToTensor(),
        et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    if binary:
        color_dict = data_loader.color_dict_binary
        target_dict = data_loader.get_target_dict()
        annotations_dict = data_loader.annotations_dict

    train_dst = LeatherData(path_mask=path_train,
                            path_img=path_train,
                            list_of_filenames=file_names_train,
                            transform=transform_function,
                            color_dict=color_dict,
                            target_dict=target_dict)
    if image_name[-5] != "k"
])
file_names_train = file_names_train[file_names_train != ".DS_S"]

file_names_val = np.array([
    image_name[:-4] for image_name in os.listdir(path_val)
    if image_name[-5] != "k"
])
file_names_val = file_names_val[file_names_val != ".DS_S"]

#transform_function = et.ExtCompose([et.ExtEnhanceContrast(), et.ExtRandomCrop((1000, 1000)), et.ExtToTensor(),
#                                   et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

transform_function = et.ExtCompose([
    et.ExtResize(scale=0.5),
    et.ExtRandomCrop(size=512, semantic_evaluation_resize=True, scale=0.7),
    et.ExtEnhanceContrast()
])

denorm = Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if binary:
    color_dict = data_loader.color_dict_binary
    target_dict = data_loader.get_target_dict()
    annotations_dict = data_loader.annotations_dict

else:
    color_dict = data_loader.color_dict
    target_dict = data_loader.get_target_dict(labels)
    annotations_dict = data_loader.annotations_dict

train_dst = LeatherData(path_mask=path_train,
예제 #6
0
        device = torch.device('cpu')
        cpu_device = torch.device('cpu')

        model_name = 'resnet50'
        path_original_data = r'C:\Users\johan\OneDrive\Skrivebord\leather_patches'
        path_meta_data = r'samples/model_comparison.csv'
        optim = "SGD"
        tif_path = r'C:\Users\johan\iCloudDrive\DTU\KID\BA\HPC\TIF\good_area1.png'
        save_path = r'C:\Users\johan\iCloudDrive\DTU\KID\BA\HPC\last_round\predictions\vda4'
        resize = False
        if resize:
            patch_size = 512
        else:
            path_size = 256

    transform_function = et.ExtCompose([et.ExtEnhanceContrast(),
                                        et.ExtToTensor()])

    print("Device: %s" % device)
    print("Exp: ", exp)
    if brevetti:
        print("REDHALF")
    else:
        print("WALKNAPPA")
    data_loader = DataLoader(data_path=path_original_data,
                             metadata_path=path_meta_data)

    array = load_tif_as_numpy_array(tif_path)
    print("Shape array: ", np.shape(array))

    if resize == True:
        resize_function = et.ExtCompose([et.ExtResize(scale=0.5)])
예제 #7
0
#    model=deeplabv3_resnet101(pretrained=True, progress=True,num_classes=21, aux_loss=None)
#    model.classifier[-1] = torch.nn.Conv2d(256, n_classes+2, kernel_size=(1, 1), stride=(1, 1)).requires_grad_()
#    model.aux_classifier[-1] = torch.nn.Conv2d(256, n_classes+2, kernel_size=(1, 1), stride=(1, 1)).requires_grad_()
#else:
#    model=_segm_mobilenet('deeplabv3', 'mobile_net', output_stride=8, num_classes=n_classes+2,pretrained_backbone=True)

#model.load_state_dict(checkpoint['model_state'])
model.to(device)
model.eval()

if resize:
    patch_dim = (int(patch_dim[0] * 0.5), int(patch_dim[1] * 0.5))
    overlap = int(0.5 * overlap)
    transform_function = et.ExtCompose([
        et.ExtResize(scale=0.5),
        et.ExtEnhanceContrast(),
        et.ExtToTensor(),
        et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

else:
    transform_function = et.ExtCompose([
        et.ExtEnhanceContrast(),
        et.ExtToTensor(),
        et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

target_tif = []
label = Image.fromarray(np.zeros(patch_dim, dtype=np.uint8))
for i in range(0, split_x_y[0], step_size):
    print(i)
    pred_stack = []
예제 #8
0
import os
from data_import.data_loader import DataLoader
from data_import.data_pipeline import import_data_and_mask
from semantic_segmentation.DeepLabV3.utils import ext_transforms as et
import torch
import object_detect.helper.utils as utils
from semantic_segmentation.DeepLabV3.dataset_class import LeatherData
from data_import.data_loader import DataLoader
from torch.utils import data
import random
import numpy as np

transform_function = et.ExtCompose([
    et.ExtRandomCrop(scale=0.7),
    et.ExtRandomHorizontalFlip(p=0.5),
    et.ExtRandomVerticalFlip(p=0.5),
    et.ExtEnhanceContrast(),
    et.ExtToTensor()
])

random_seed = 1
torch.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)

device = torch.device('cpu')
lr = 0.01
layers_to_train = 'classifier'
num_epoch = 1
path_original_data = r'C:\Users\johan\OneDrive\Skrivebord\leather_patches'
path_meta_data = r'samples/model_comparison.csv'
예제 #9
0
from torch.utils import data
from semantic_segmentation.DeepLabV3.metrics import StreamSegMetrics
from semantic_segmentation.DeepLabV3.utils import ext_transforms as et
from semantic_segmentation.DeepLabV3.utils.utils import Denormalize
import torch
import torch.nn as nn
from torchvision.models.segmentation import deeplabv3_resnet101
import os
import PIL
import pickle
import matplotlib.pyplot as plt

transform_function = et.ExtCompose([
    et.ExtTransformLabel(),
    et.ExtCenterCrop(512),
    et.ExtScale(512),
    et.ExtEnhanceContrast(),
    et.ExtToTensor(),
    et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

num_classes = 2
output_stride = 16
save_val_results = False
total_itrs = 100
lr = 0.01
lr_policy = 'step'
step_size = 10000
batch_size = 16
val_batch_size = 4
loss_type = "cross_entropy"
weight_decay = 1e-4