def main():
    train_list = make_datapath_list(phase="train")
    val_list = make_datapath_list(phase="val")

    # Dataset
    train_dataset = MyDataset(train_list,
                              transform=ImageTransform(resize, mean, std),
                              phase="train")
    val_dataset = MyDataset(val_list,
                            transform=ImageTransform(resize, mean, std),
                            phase="val")

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=4,
                                                   shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=4,
                                                 shuffle=False)

    dataloader_dict = {"train": train_dataloader, "val": val_dataloader}

    # Network
    use_pretrained = "true"
    net = models.vgg16(pretrained=use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2)

    # Loss
    criterior = nn.CrossEntropyLoss()

    # Optimizer
    # param_to_update = []

    # update_param_name = ["classifier.6.weight", "classifier.6.bias"]

    # for name, param in net.named_parameters():
    #     if name in update_param_name:
    #         param.requires_grad = True
    #         param_to_update.append(param)
    #         print(name)
    #     else:
    #         param.requires_grad = False
    params1, params2, params3 = param_to_update(net)

    optimizer = optim.SGD([
        {
            'params': params1,
            'lr': 1e-4
        },
        {
            'params': params2,
            'lr': 5e-4
        },
        {
            'params': params3,
            'lr': 1e-3
        },
    ],
                          momentum=0.9)

    train_model(net, dataloader_dict, criterior, optimizer, num_epochs)
Example #2
0
def main():
    train_list = make_datapath_list("train")
    val_list = make_datapath_list("val")

    # dataset
    train_dataset = MyDataset(train_list,
                              transform=ImageTransform(resize, mean, std),
                              phase="train")
    val_dataset = MyDataset(val_list,
                            transform=ImageTransform(resize, mean, std),
                            phase="val")

    # dataloader
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size,
                                                   shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size,
                                                 shuffle=False)
    dataloader_dict = {"train": train_dataloader, "val": val_dataloader}

    # network
    use_pretrained = True
    net = models.vgg16(pretrained=use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2)

    # loss
    criterior = nn.CrossEntropyLoss()

    # optimizer
    params1, params2, params3 = params_to_update(net)
    optimizer = optim.SGD([
        {
            'params': params1,
            'lr': 1e-4
        },
        {
            'params': params2,
            'lr': 5e-4
        },
        {
            'params': params3,
            'lr': 1e-3
        },
    ],
                          momentum=0.9)

    # training
    train_model(net, dataloader_dict, criterior, optimizer, num_epochs)
Example #3
0
def main():
    # 画像の読み込みと表示
    img_path = './data/9497/resize-070327.jpg'
    img = Image.open(img_path)
    plt.imshow(img)
    plt.show()

    size = (224, 224)
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)

    transform = ImageTransform(size)
    img_transform = transform(img, phase='train')
    print(img_transform.shape)

    img_transformed = img_transform.numpy().transpose((1, 2, 0))
    img_transformed = np.clip(img_transformed, 0, 1)
    plt.imshow(img_transformed)
    plt.show()
Example #4
0
def predict(img):
    # prepare network
    use_pretrained = True
    net = models.vgg16(pretrained=use_pretrained)
    net.classifier[6] = nn.Linear(in_features=4096, out_features=2)
    net.eval()

    # prepare model
    model = load_model(net, save_path)

    # prepare input img
    transform = ImageTransform(resize, mean, std)
    img = transform(img, phase="test")
    img = img.unsqueeze_(
        0)  # (chan, height, width) -> (1, chan, height, width)

    # predict
    output = model(img)
    response = predictor.predict_max(output)

    return response
df_labels_idx = df_train.loc[df_train.duplicated(["labels", "labels_n"])==False]\
                [["labels_n", "labels"]].set_index("labels_n").sort_index()

# Load Image
train_list = make_datapath_list(phase="train")
print(f"train data length : {len(train_list)}")
val_list = make_datapath_list(phase="val")
print(f"validation data length : {len(val_list)}")
test_list = make_datapath_list(phase="test")
print(f"test data length : {len(test_list)}")

# Create Dataset
train_dataset = PlantDataset(df_labels_idx,
                             df_train,
                             train_list,
                             transform=ImageTransform(size, mean, std),
                             phase='train')
val_dataset = PlantDataset(df_labels_idx,
                           df_train,
                           val_list,
                           transform=ImageTransform(size, mean, std),
                           phase='val')
test_dataset = PlantDataset(df_labels_idx,
                            df_train,
                            test_list,
                            transform=ImageTransform(size, mean, std),
                            phase='test')

index = 0

print("【train dataset】")
Example #6
0
def main(cfg):
    # 病理画像のデータのリストを取得
    data_dir = make_data_path_list()

    # データセットの作成
    dataset = PathologicalImage(file_list=data_dir,
                                transform=ImageTransform(cfg.size),
                                num_pixels=cfg.num_pixels)

    # 学習用データの枚数を取得
    train_size = int(len(dataset) * cfg.rate)

    # 検証用のデータの枚数を取得
    val_size = len(dataset) - train_size

    # データセットの分割
    train_dataset, val_dataset = data.random_split(dataset,
                                                   [train_size, val_size])

    # 動作確認
    print("入力画像サイズ:" + str(train_dataset.__getitem__(0)[0].size()))
    print("教師データサイズ:" + str(train_dataset.__getitem__(0)[1].shape))

    # 学習用のDataLoaderを作成
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=cfg.batch_size,
                                                   shuffle=True)

    # 検証用のDataLoaderを作成
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=cfg.batch_size,
                                                 shuffle=False)

    # 動作確認
    batch_iterator = iter(train_dataloader)
    inputs, labels = next(batch_iterator)
    log.info("-----Image and label shape of dataloader-----")
    log.info("入力データ:" + str(inputs.size()))
    log.info("入力ラベル:" + str(labels.shape))

    # GPU初期設定
    # ネットワークモデル(自作FCNs)をimport
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # ネットワークをGPUへ
    net = FCNs()
    net.to(device)
    torch.backends.cudnn.benchmark = True
    log.info("-----Constitution of network-----")
    log.info(net)

    # 損失関数の設定
    criterion = nn.MSELoss()

    # 最適化手法の設定
    optimizer = optim.SGD(net.parameters(),
                          lr=cfg.SGD.lr,
                          momentum=cfg.SGD.momentum)
    log.info("-----Details of optimizer function-----")
    log.info(optimizer)

    # 損失値を保持するリスト
    train_loss = []
    val_loss = []

    # 学習
    for epoch in range(cfg.num_epochs):
        log.info("Epoch {} / {} ".format(epoch + 1, cfg.num_epochs))
        log.info("----------")

        # 学習
        train_history = train_model(net, train_dataloader, criterion,
                                    optimizer)

        # 学習したlossのリストを作成
        train_loss.append(train_history)

        # 検証
        val_history = val_model(net, val_dataloader, criterion)

        # 検証したlossのリスト作成
        val_loss.append(val_history)

    # テストと出力値保存
    test_history = test_model(net, val_dataloader, criterion)

    # figインスタンスとaxインスタンスを作成
    fig_loss, ax_loss = plt.subplots(figsize=(10, 10))
    ax_loss.plot(range(1, cfg.num_epochs + 1, 1),
                 train_loss,
                 label="train_loss")
    ax_loss.plot(range(1, cfg.num_epochs + 1, 1), val_loss, label="val_loss")
    ax_loss.set_xlabel("epoch")
    ax_loss.legend()
    fig_loss.savefig("loss.png")

    # パラメータの保存
    save_path = './pathological.pth'
    torch.save(net.state_dict(), save_path)