def main():
    train_list = make_datapath_list(phase="train")
    val_list = make_datapath_list(phase="val")

    # Dataset
    train_dataset = MyDataset(train_list,
                              transform=ImageTransform(resize, mean, std),
                              phase="train")
    val_dataset = MyDataset(val_list,
                            transform=ImageTransform(resize, mean, std),
                            phase="val")

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=4,
                                                   shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=4,
                                                 shuffle=False)

    dataloader_dict = {"train": train_dataloader, "val": val_dataloader}

    # Network
    use_pretrained = "true"
    net = models.vgg16(pretrained=use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2)

    # Loss
    criterior = nn.CrossEntropyLoss()

    # Optimizer
    # param_to_update = []

    # update_param_name = ["classifier.6.weight", "classifier.6.bias"]

    # for name, param in net.named_parameters():
    #     if name in update_param_name:
    #         param.requires_grad = True
    #         param_to_update.append(param)
    #         print(name)
    #     else:
    #         param.requires_grad = False
    params1, params2, params3 = param_to_update(net)

    optimizer = optim.SGD([
        {
            'params': params1,
            'lr': 1e-4
        },
        {
            'params': params2,
            'lr': 5e-4
        },
        {
            'params': params3,
            'lr': 1e-3
        },
    ],
                          momentum=0.9)

    train_model(net, dataloader_dict, criterior, optimizer, num_epochs)
Esempio n. 2
0
def main() :
    train_list = make_datapath_list("train")
    val_list = make_datapath_list("val")

    #dataset
    train_dataset = MyDataset(train_list,transform=ImageTransform(resize,mean,std),phase='train')
    val_dataset = MyDataset(val_list,transform=ImageTransform(resize,mean,std),phase='val')

    #dataloader
    
    train_dataloader = torch.utils.data.DataLoader(train_dataset,batch_size,shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,batch_size,shuffle=False)
    dataloader_dict = {"train":train_dataloader,"val":val_dataloader}

    #NETWORK
    use_pretrained = True
    net = models.vgg16(pretrained = use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2,bias=True)
    print(net)
    #setting mode
    net = net.train()

    #LOSS
    criterior = nn.CrossEntropyLoss()

    #OPTIMIZER
    # Update thong so mong muon
    # params_to_update = []
    # update_params_name = ["classifier.6.weight","classifier.6.bias"]
    # for name,param in net.named_parameters() :
    #     if name in update_params_name :
    #         param.requires_grad = True
    #         params_to_update.append(param)
    #         print(name)
    #     else:
    #         param.requires_grad = False
    params1,params2,params3 = params_to_update(net)
    # params = update trong so luu vao
    #lr = he so hoc
    #momentun = 
    optimizer = optim.SGD([
        {"params" :params1, "lr" : 1e-4},
        {"params" :params2, "lr" : 5e-4},
        {"params" :params3, "lr" : 1e-3} 
    ],momentum = 0.9)

    #training

    train_model(net,dataloader_dict,criterior, optimizer, num_epoch)
Esempio n. 3
0
def main():
    train_list = make_datapath_list("train")
    val_list = make_datapath_list("val")

    # dataset
    train_dataset = MyDataset(train_list,
                              transform=ImageTransform(resize, mean, std),
                              phase="train")
    val_dataset = MyDataset(val_list,
                            transform=ImageTransform(resize, mean, std),
                            phase="val")

    # dataloader
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size,
                                                   shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size,
                                                 shuffle=False)
    dataloader_dict = {"train": train_dataloader, "val": val_dataloader}

    # network
    use_pretrained = True
    net = models.vgg16(pretrained=use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2)

    # loss
    criterior = nn.CrossEntropyLoss()

    # optimizer
    params1, params2, params3 = params_to_update(net)
    optimizer = optim.SGD([
        {
            'params': params1,
            'lr': 1e-4
        },
        {
            'params': params2,
            'lr': 5e-4
        },
        {
            'params': params3,
            'lr': 1e-3
        },
    ],
                          momentum=0.9)

    # training
    train_model(net, dataloader_dict, criterior, optimizer, num_epochs)
Esempio n. 4
0
    def make_file_list(self):
        file_list = []
        path = self.params["data_path"][self.phase]
        tissue_path = self.params["data_path"]["tissue_"+self.phase]
        if path:
            file_list.extend(make_datapath_list(path, self.labels))
        if tissue_path:
            file_list.extend(make_datapath_list(tissue_path, self.labels))
        
        arrange = self.params["imbalance"]
        # データ数の調整ありの場合
        if ((arrange=="oversampling") or (arrange=="undersampling")) and (self.phase == "train"):
            arrange_file_list = []
            file_dict = self.make_file_dict(file_list)
            
            # undrersampling(+bagging)を行う場合
            if arrange == "undersampling":
                min_file_num = float("inf")
                for val in file_dict.values():
                    min_file_num = min(min_file_num, len(val))
                for val in file_dict.values():
#                     データの重複あり(baggingする場合はこっち)
#                     arrange_file_list.append(random.choices(val, k=min_file_num))
#                     データの重複なし(baggingしない場合はこっち)
                    arrange_file_list.append(random.sample(val, min_file_num))
            
            # oversamplingを行う場合
            elif arrange == "oversampling":
                max_file_num = 0
                for val in file_dict.values():
                    max_file_num = max(max_file_num, len(val))
                for val in file_dict.values():
                    arrange_file_list.append(random.choices(val, k=max_file_num)) # 重複あり
#                     random.sampleは再標本化後の数値kがもとの要素数より大きいと使えない
                
            file_list = list(itertools.chain.from_iterable(arrange_file_list))
        return file_list
Esempio n. 5
0
        SAVEWHITEPATH = SAVEBASEPATH / 'white'
    else:
        print('Error:正しいパスを入力してください')
        sys.exit()

    LABELS = ['Normal', 'PTC', 'fvptc', 'ftc', 'med', 'poor', 'und']
    SIZE = 224  # リサイズする大きさ

    if '03_迅速標本frozen' in str(BASEPATH):
        back_ground_color = (221, 207, 220)
        threshold = 203
    else:
        back_ground_color = (234, 228, 224)  # 背景とほぼ同じ色
        threshold = 220

    path_list = make_datapath_list(str(BASEPATH), LABELS)
    print('前処理:縮小、正方形化、ほぼ背景のみの画像の削除を行います')
    for path in tqdm(path_list):
        image = Image.open(path)
        result_image = resize(image=image, size=SIZE)
        result_image = padding_square(image=result_image,
                                      background_color=back_ground_color)
        if is_white_image(result_image, threshold=threshold):
            continue
            # save_path = pathlib.Path(path.replace(str(BASEPATH), str(SAVEWHITEPATH)))
        else:
            save_path = pathlib.Path(
                path.replace(str(BASEPATH), str(SAVEBASEPATH)))
        if not save_path.parent.exists():
            save_path.parent.mkdir(parents=True)
        result_image.save(save_path)
Esempio n. 6
0
from LoadData import HymenopteraDataset
from train import train_model
from utils import ImageTransform, make_datapath_list

from torch.utils.data import DataLoader
from torchvision.models import vgg16
import torch.nn as nn
import torch.optim as optim
import torch

resize = 224
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)

train_list = make_datapath_list(mode='train')
val_list = make_datapath_list(mode='val')

train_dataset = HymenopteraDataset(file_list=train_list,
                                   transform=ImageTransform(resize, mean, std),
                                   mode='train')
val_dataset = HymenopteraDataset(file_list=val_list,
                                 transform=ImageTransform(resize, mean, std),
                                 mode='val')

batch_size = 32

train_dataloader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)