Пример #1
0
def get_transform_fun(resized=False):
    if resized == True:
        transform_function = et.ExtCompose([
            et.ExtRandomCrop(size=2048),
            et.ExtRandomCrop(scale=0.7, size=None),
            et.ExtEnhanceContrast(),
            et.ExtRandomCrop(size=2048, pad_if_needed=True),
            et.ExtResize(scale=0.5),
            et.ExtRandomHorizontalFlip(p=0.5),
            et.ExtRandomCrop(size=512),
            et.ExtRandomVerticalFlip(p=0.5),
            et.ExtToTensor()
        ])
    else:
        transform_function = et.ExtCompose([
            et.ExtRandomCrop(size=256),
            et.ExtRandomHorizontalFlip(p=0.5),
            et.ExtRandomVerticalFlip(p=0.5),
            et.ExtEnhanceContrast(),
            et.ExtToTensor()
        ])
    return transform_function
Пример #2
0
    file_names_train = file_names_train[shuffled_index]
    file_names_train = file_names_train[file_names_train != ".DS_S"]

    file_names_val = np.array([
        image_name[:-4] for image_name in os.listdir(path_val)
        if image_name[-5] != "k"
    ])
    N_files = len(file_names_val)
    shuffled_index = np.random.permutation(len(file_names_val))
    file_names_val = file_names_val[shuffled_index]
    file_names_val = file_names_val[file_names_val != ".DS_S"]

    # #FOR EXTENDED DATASET EXPERIMENT
    transform_function = et.ExtCompose([
        et.ExtRandomHorizontalFlip(p=0.5),
        et.ExtRandomCrop(size=SIZE),
        et.ExtEnhanceContrast(),
        et.ExtRandomVerticalFlip(p=0.5),
        et.ExtToTensor(),
        et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    if binary:
        color_dict = data_loader.color_dict_binary
        target_dict = data_loader.get_target_dict()
        annotations_dict = data_loader.annotations_dict

    train_dst = LeatherData(path_mask=path_train,
                            path_img=path_train,
                            list_of_filenames=file_names_train,
                            transform=transform_function,
    if image_name[-5] != "k"
])
file_names_train = file_names_train[file_names_train != ".DS_S"]

file_names_val = np.array([
    image_name[:-4] for image_name in os.listdir(path_val)
    if image_name[-5] != "k"
])
file_names_val = file_names_val[file_names_val != ".DS_S"]

#transform_function = et.ExtCompose([et.ExtEnhanceContrast(), et.ExtRandomCrop((1000, 1000)), et.ExtToTensor(),
#                                   et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

transform_function = et.ExtCompose([
    et.ExtResize(scale=0.5),
    et.ExtRandomCrop(size=512, semantic_evaluation_resize=True, scale=0.7),
    et.ExtEnhanceContrast()
])

denorm = Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if binary:
    color_dict = data_loader.color_dict_binary
    target_dict = data_loader.get_target_dict()
    annotations_dict = data_loader.annotations_dict

else:
    color_dict = data_loader.color_dict
    target_dict = data_loader.get_target_dict(labels)
    annotations_dict = data_loader.annotations_dict

train_dst = LeatherData(path_mask=path_train,
Пример #4
0
sys.path.append('/zhome/dd/4/128822/Bachelorprojekt/Bachelor-Criterion-AI')

import os
from data_import.data_loader import DataLoader
from data_import.data_pipeline import import_data_and_mask
from semantic_segmentation.DeepLabV3.utils import ext_transforms as et
import torch
import object_detect.helper.utils as utils
from semantic_segmentation.DeepLabV3.dataset_class import LeatherData
from data_import.data_loader import DataLoader
from torch.utils import data
import random
import numpy as np

transform_function = et.ExtCompose([
    et.ExtRandomCrop(scale=0.7),
    et.ExtRandomHorizontalFlip(p=0.5),
    et.ExtRandomVerticalFlip(p=0.5),
    et.ExtEnhanceContrast(),
    et.ExtToTensor()
])

random_seed = 1
torch.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)

device = torch.device('cpu')
lr = 0.01
layers_to_train = 'classifier'
num_epoch = 1