Esempio n. 1
0
"""
loss_func = nn.MSELoss()

print("loading view_data")
view_data_in = img_to_tensorVariable(
    os.path.join(dir_input,'test/'), namelist_input, 1, N_TEST_IMG
)
view_data_out = img_to_tensorVariable(
    os.path.join(dir_output,'test/'), namelist_output, 1, N_TEST_IMG
)
print("loading complete")

print("loading train_loader")
folder_input = folder2.ImageFolder(
    root = dir_input,
    transform = trans_comp,
    loader = folder2.pil_loader
)
train_loader_input = torch.utils.data.DataLoader(
    folder_input, batch_size = BATCH_SIZE, shuffle = False)

folder_output = folder2.ImageFolder(
    root = dir_output,
    transform = trans_comp,
    loader = folder2.pil_loader
)
train_loader_output = torch.utils.data.DataLoader(
    folder_output, batch_size = BATCH_SIZE, shuffle = False)
print("loading complete")

print("loading test_loader")
Esempio n. 2
0
recon_loss_func = nn.MSELoss()
gen_optimizer = torch.optim.Adam(generator.parameters(), lr=lr)

# training
img_dir_train = "/home/powergkrry/lv_challenge/data/dataset/dataset06/i/train/"
img_dir_train_out = "/home/powergkrry/lv_challenge/data/dataset/dataset06/i/train_output/"
img_dir_test = "/home/powergkrry/lv_challenge/data/dataset/dataset06/i/test/"
img_dir_test_out = "/home/powergkrry/lv_challenge/data/dataset/dataset06/i/test_output/"
dir_list = os.listdir(img_dir_train)
dir_list.sort()
img_list = os.listdir(img_dir_test_out + "o/output_o/")
img_list.sort()

for flip in dir_list:
    img_data_arr_test = folder2.ImageFolder(root=img_dir_test + flip,
                                            transform=transforms.Compose([
                                                transforms.ToTensor(),
                                            ]))

    img_batch_arr_test = data.DataLoader(img_data_arr_test,
                                         batch_size=batch_size,
                                         num_workers=2)

    img_output_arr_test = folder2.ImageFolder(root=img_dir_test_out + flip,
                                              transform=transforms.Compose([
                                                  transforms.ToTensor(),
                                              ]))

    img_output_batch_arr_test = data.DataLoader(img_output_arr_test,
                                                batch_size=batch_size,
                                                num_workers=2)
Esempio n. 3
0
    for traintest in dir_list_traintest:
        os.mkdir(out_dir + iop + traintest)

        img_name_list = os.listdir(img_dir + iop + traintest + "o/original_o/")
        #img_name_list = os.listdir(img_dir + iop + traintest + "o/1_o/")
        #img_name_list = os.listdir(img_dir + iop + traintest + "o/0.1_o/")

        img_name_list.sort()
        img_num = len(img_name_list)
        for fliprot in dir_list_fliprot:
            os.mkdir(out_dir + iop + traintest + fliprot)

            img_folder = folder2.ImageFolder(
                root=img_dir + iop + traintest + fliprot,
                transform=transforms.Compose([
                    #transforms.CenterCrop(128),
                    transforms.ToTensor(),
                    transforms.Normalize([0.2481], [0.3109]),
                ]))
            img_dataloader = data.DataLoader(
                img_folder,
                batch_size=1,
                #num_workers = 2,
            )

            for idx in range(img_num):
                tensor_arr = []
                for i_batch, (img, cat) in enumerate(img_dataloader):
                    if (i_batch % img_num == idx):
                        tensor_arr.append(img)
                all_diffusion = torch.cat(tensor_arr, dim=1)
Esempio n. 4
0
"""
loss_func = nn.MSELoss()
"""
print("loading view_data")
view_data_in = img_to_tensorVariable(
    os.path.join(dir_input,'test/'), namelist_input, 1, N_TEST_IMG
)
view_data_out = img_to_tensorVariable(
    os.path.join(dir_output,'test/'), namelist_output, 1, N_TEST_IMG
)
print("loading complete")
"""

print("loading train_loader")
folder_output = folder2.ImageFolder(root=dir_clean,
                                    transform=trans_comp,
                                    loader=folder2.pil_loader)
train_loader_clean = torch.utils.data.DataLoader(folder_output,
                                                 batch_size=BATCH_SIZE,
                                                 shuffle=False)
print("loading complete")

print("loading test_loader")
folder_test_clean = folder2.ImageFolder(root=dir_test_clean,
                                        transform=trans_comp,
                                        loader=folder2.pil_loader)
test_loader_clean = torch.utils.data.DataLoader(folder_test_clean,
                                                batch_size=test_BATCH_SIZE,
                                                shuffle=False)
print("loading complete")
Esempio n. 5
0
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.MSELoss()

print("loading view_data")
view_data_in = img_to_tensorVariable(os.path.join(dir_input, 'test/'),
                                     namelist_input, 1, N_TEST_IMG)
view_data_out = img_to_tensorVariable(os.path.join(dir_output, 'test/'),
                                      namelist_output, 1, N_TEST_IMG)
print("loading complete")

start_time = time.time()
print("loading train_loader")
#train_loader_input = img_to_tensorVariable(dir_input, namelist_input, 1, 10000)
folder_input = folder2.ImageFolder(root=dir_input,
                                   transform=ToTensor(),
                                   loader=folder2.pil_loader)
train_loader_input = torch.utils.data.DataLoader(folder_input,
                                                 batch_size=BATCH_SIZE,
                                                 shuffle=False)

folder_output = folder2.ImageFolder(root=dir_output,
                                    transform=ToTensor(),
                                    loader=folder2.pil_loader)
train_loader_output = torch.utils.data.DataLoader(folder_output,
                                                  batch_size=BATCH_SIZE,
                                                  shuffle=False)
"""
view_data_in = torch.utils.data.DataLoader(
    folder_input[0][:N_TEST_IMG], batch_size = N_TEST_IMG, shuffle = False)
view_data_out = torch.utils.data.DataLoader(
Esempio n. 6
0

def return_mean_std(tensor):
    mean_t = tensor.mean()
    std_t = tensor.std()

    return transforms.Normalize([mean_t], [std_t])


#input
img_folder = folder2.ImageFolder(
    root=img_dir,
    transform=transforms.Compose([
        #transforms.CenterCrop(128),
        transforms.ToTensor(),
        #transforms.Lambda(lambda tensor: transforms.Normalize(str([x.mean()]) + ", " + str([x.std()]) for x in tensor)),
        transforms.Lambda(lambda tensor: save_t=tensor,
                          mean_t=save_t.mean(),
                          std_t=save_t.std(),
                          transforms.Normalize([mean_t], [std_t])),
    ]))

img_dataloader = data.DataLoader(
    img_folder,
    batch_size=1,
    #num_workers = 2,
)

for i, (img_tensor, label) in enumerate(img_dataloader):
    out_dir_name = out_dir + "ioriginal_png/" + input_dir[
        label[0]] + "/" + img_name_list[i % len(img_name_list)][:3] + ".pt"
Esempio n. 7
0
# hyperparameters
batch_size = args.batch_size
lr = args.lr
epoch = args.epoch
img_size = 256
train_error = []
test_error = []

# input pipeline
img_dir = "/home/powergkrry/lv_challenge/data/dataset/dataset02/p/train/"
dir_list = os.listdir(img_dir)
dir_list.remove('output')

img_data_arr = [
    folder2.ImageFolder(root=img_dir + dirname,
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                        ])) for dirname in dir_list
]

img_batch_arr = [
    data.DataLoader(img_data, batch_size=batch_size, num_workers=2)
    for img_data in img_data_arr
]

img_output_arr = folder2.ImageFolder(
    root="/home/powergkrry/lv_challenge/data/dataset/dataset02/p/train/output",
    transform=transforms.Compose([
        transforms.ToTensor(),
    ]))

img_output_batch_arr = data.DataLoader(img_output_arr,
Esempio n. 8
0
    return result


if __name__ == '__main__':
    dir_clean = '/home/powergkrry/MURA/MURA_TRAIN_RESIZE/'

    trans_comp = transforms.Compose([
        #transforms.CenterCrop(100),
        FullCrop((32, 35)),
        transforms.Lambda(lambda crops: torch.stack(
            [transforms.ToTensor()(crop) for crop in crops]))
    ])

    folder_output = folder2.ImageFolder(root=dir_clean,
                                        transform=trans_comp,
                                        loader=folder2.pil_loader)
    folder_output2 = folder2.ImageFolder(root=dir_clean,
                                         transform=transforms.ToTensor(),
                                         loader=folder2.pil_loader)
    train_loader_clean = torch.utils.data.DataLoader(folder_output,
                                                     batch_size=1,
                                                     shuffle=False)
    train_loader_clean2 = torch.utils.data.DataLoader(folder_output2,
                                                      batch_size=1,
                                                      shuffle=False)

    print(train_loader_clean)

    it = iter(train_loader_clean)
    img, _ = it.next()
Esempio n. 9
0
    print(p.size())

optimizer = optim.Adam(model.parameters(), lr=0.001)

data_transform = transforms.Compose([
    transforms.RandomSizedCrop(150),
    #transforms.RandomHorizontalFlip(),
    transforms.ToTensor()
])

model.train()
test_accu = []
i = 0
for epoch in range(10000):
    ddsm_dataset = folder2.ImageFolder(
        root='/hoem04/powergkrry/CBIS-DDSM_data/Test1data/Test1data_PNG',
        transform=data_transform,
        loader=folder2.pil_loader)

    train_loader = torch.utils.data.DataLoader(ddsm_dataset,
                                               batch_size=8,
                                               shuffle=True)

    print("dataset :")
    print(ddsm_dataset)

    for data, target in train_loader:
        data, target = Variable(data).cuda(), Variable(target).cuda()
        #print(data)
        #print(target)
        optimizer.zero_grad()
        output = model(data)
Esempio n. 10
0
    batch_size = 1,
    #num_workers = 2,
)

for i, (img_tensor, label) in enumerate(img_dataloader):
    out_dir_name = out_dir + "ioriginal_png/" + input_dir[label[0]] + "/" + img_name_list[i % len(img_name_list)][:3] + ".pt"
    torch.save(img_tensor.view(-1, 1, 192, 192), out_dir_name)
    print(out_dir_name)


"""
#output
img_folder = folder2.ImageFolder(
    root=img_dir,
    transform=transforms.Compose([
        transforms.CenterCrop(192),
        transforms.ToTensor(),
        #transforms.FiveCrop(64),
        #transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
    ]))

img_dataloader = data.DataLoader(
    img_folder,
    batch_size=1,
    #num_workers = 2,
)

for i, (img_tensor, label) in enumerate(img_dataloader):
    out_dir_name = out_dir + "ioriginal_png/" + input_dir[
        label[0]] + "/" + img_name_list[i % len(img_name_list)][:3] + ".pt"
    torch.save(img_tensor.view(-1, 1, 192, 192), out_dir_name)
    print(out_dir_name)
Esempio n. 11
0
r_train_error = []
test_error = []
r_test_error = []

flip = ['_o', '_h', '_v', 'hv']
# input pipeline
img_dir = "/home/yeonjee/lv_challenge/data/dataset/dataset01/p/train/"
# 디렉토리를 4개로 분리시키는 건 어떨까?
dir_list = [[
    dir_name for dir_name in os.listdir(img_dir) if f == dir_name[-2:]
] for f in flip]
for lst in dir_list:
    lst.sort()

img_data_arr = [[
    folder2.ImageFolder(root=img_dir + dirname,
                        transform=transforms.ToTensor()) for dirname in dir_l
] for dir_l in dir_list]

img_batch_arr = [[
    data.DataLoader(img_data, batch_size=batch_size, num_workers=2)
    for img_data in img_data_a
] for img_data_a in img_data_arr]

img_output_arr = folder2.ImageFolder(
    root="/home/yeonjee/lv_challenge/data/dataset/dataset01/p/train/output",
    transform=transforms.Compose([
        fourflip(),
        transforms.Lambda(lambda crops: torch.stack(
            [transforms.ToTensor()(crop) for crop in crops]))
    ]))
Esempio n. 12
0
f, a = plt.subplots(3, N_TEST_IMG, figsize=(5,3))
plt.ion()

for i in range(N_TEST_IMG):
    a[0][i].imshow(view_data_in.data.cpu().numpy()[i].reshape(256,256), cmap='gray')
    a[0][i].set_xticks(())
    a[0][i].set_yticks(())
    a[1][i].imshow(view_data_out.data.cpu().numpy()[i].reshape(256,256), cmap='gray')
    a[1][i].set_xticks(())
    a[1][i].set_yticks(())
plt.show()
"""
for dir_input_location in os.listdir(dir_input):
    print("loading train_loader")
    folder_input = folder2.ImageFolder(root=os.path.join(
        dir_input, dir_input_location),
                                       transform=trans_comp,
                                       loader=folder2.pil_loader)

    train_loader_input = torch.utils.data.DataLoader(folder_input,
                                                     batch_size=BATCH_SIZE,
                                                     shuffle=False)

    folder_output = folder2.ImageFolder(root=dir_output,
                                        transform=trans_comp,
                                        loader=folder2.pil_loader)

    train_loader_output = torch.utils.data.DataLoader(folder_output,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

    print("loading complete")