コード例 #1
0
    def forward(self, seq, xinp):
        xout = tools.to_cuda(Variable(torch.zeros(xinp.size()[0], xinp.size()[1], self.hidden_size, self.height, self.width)))

        h_state, c_state = ( tools.to_cuda(Variable(torch.zeros(xinp[0].shape[0], self.hidden_size, self.height, self.width))),
                             tools.to_cuda(Variable(torch.zeros(xinp[0].shape[0], self.hidden_size, self.height, self.width))) )

        for t in range(xinp.size()[0]):
            input_t = seq(xinp[t])
            xout[t] = input_t
            h_state, c_state = self.RCell(input_t, h_state, c_state)

        return self.dropout(h_state), xout
コード例 #2
0
ファイル: infer.py プロジェクト: mpapadomanolaki/U-REC
def sliding_window(IMAGE, patch_size, step):
    prediction = np.zeros((IMAGE.shape[0], IMAGE.shape[1], 6))

    x = 0
    while (x != IMAGE.shape[0]):
        y = 0
        while (y != IMAGE.shape[1]):

            if (not y + patch_size > IMAGE.shape[1]) and (
                    not x + patch_size > IMAGE.shape[0]):
                patch = IMAGE[x:x + patch_size, y:y + patch_size, :]
                patch = np.transpose(patch, (2, 0, 1)) / 255.0  #normalization
                patch = np.reshape(
                    patch, (1, patch.shape[0], patch.shape[1], patch.shape[2]))
                patch = tools.to_cuda(torch.from_numpy(patch).float())
                output, rec = model(patch)
                output = output.cpu().data.numpy().squeeze()
                output = np.transpose(output, (1, 2, 0))
                patch_pred = np.zeros((patch_size, patch_size))
                for i in range(0, patch_size):
                    for j in range(0, patch_size):
                        prediction[x + i, y + j] += (output[i, j, :])

                stride = step

            if y + patch_size == IMAGE.shape[1]:
                break

            if y + patch_size > IMAGE.shape[1]:
                y = IMAGE.shape[1] - patch_size
            else:
                y = y + stride

        if x + patch_size == IMAGE.shape[0]:
            break

        if x + patch_size > IMAGE.shape[0]:
            x = IMAGE.shape[0] - patch_size
        else:
            x = x + stride

    final_pred = np.zeros((IMAGE.shape[0], IMAGE.shape[1], 3))
    print('ok')
    for i in range(0, final_pred.shape[0]):
        for j in range(0, final_pred.shape[1]):
            final_pred[i, j] = to_color(np.argmax(prediction[i, j]))

    return final_pred
コード例 #3
0
def sliding_window(IMAGE, patch_size, step):
    prediction = np.zeros((IMAGE.shape[3], IMAGE.shape[4], 2))
    x = 0
    while (x != IMAGE.shape[0]):
        y = 0
        while (y != IMAGE.shape[1]):

            if (not y + patch_size > IMAGE.shape[4]) and (
                    not x + patch_size > IMAGE.shape[3]):
                patch = IMAGE[:, :, :, x:x + patch_size, y:y + patch_size]
                patch = tools.to_cuda(torch.from_numpy(patch).float())
                output = model(patch)
                output = output.cpu().data.numpy().squeeze()
                output = np.transpose(output, (1, 2, 0))
                for i in range(0, patch_size):
                    for j in range(0, patch_size):
                        prediction[x + i, y + j] += (output[i, j, :])

                stride = step

            if y + patch_size == IMAGE.shape[4]:
                break

            if y + patch_size > IMAGE.shape[4]:
                y = IMAGE.shape[4] - patch_size
            else:
                y = y + stride

        if x + patch_size == IMAGE.shape[3]:
            break

        if x + patch_size > IMAGE.shape[3]:
            x = IMAGE.shape[3] - patch_size
        else:
            x = x + stride

    final_pred = np.zeros((IMAGE.shape[3], IMAGE.shape[4]))
    print('ok')
    for i in range(0, final_pred.shape[0]):
        for j in range(0, final_pred.shape[1]):
            final_pred[i, j] = np.argmax(prediction[i, j])

    final_pred[final_pred == 1] = 2
    final_pred[final_pred == 0] = 1

    return final_pred
コード例 #4
0
from torch.utils.data import DataLoader

csv_file = './xys/myxys_train.csv'
csv_file_val = './xys/myxys_val.csv'
image_ids = [11, 13, 1, 21, 23, 26, 28, 30, 32, 34, 37, 3, 5, 7]  #train areas
infer_ids = [15, 17]  #validation areas
img_folder = '/home/mariapap/DATA/top/'  #folder with tif images of Vaihingen city
lbl_folder = '/home/mariapap/DATA/GROUNDTRUTH/ISPRS_semantic_labeling_Vaihingen_ground_truth_COMPLETE_1D/'  #folder with 2D groundtruth images, category indices:0,1,2,3,4,5
patch_size = 256
train_dataset = custom.MyDataset(csv_file, image_ids, img_folder, lbl_folder,
                                 patch_size)
val_dataset = custom.MyDataset(csv_file_val, infer_ids, img_folder, lbl_folder,
                               patch_size)

batchsize = 14
model = tools.to_cuda(u_rec.UNet(3, 6))
base_lr = 0.0001
optimizer = optim.Adam(model.parameters(), lr=base_lr)
criterion1 = tools.to_cuda(torch.nn.CrossEntropyLoss())
criterion2 = tools.to_cuda(torch.nn.L1Loss())

iter_ = 0
epochs = 100
confusion_matrix = tnt.meter.ConfusionMeter(6)
save_folder = 'models'
os.mkdir(save_folder)
ff = open('./' + save_folder + '/progress.txt', 'w')

for epoch in range(1, epochs + 1):
    mydataset = DataLoader(train_dataset, batch_size=14, shuffle=True)
    model.train()
コード例 #5
0
ファイル: infer.py プロジェクト: mpapadomanolaki/U-REC
        else:
            x = x + stride

    final_pred = np.zeros((IMAGE.shape[0], IMAGE.shape[1], 3))
    print('ok')
    for i in range(0, final_pred.shape[0]):
        for j in range(0, final_pred.shape[1]):
            final_pred[i, j] = to_color(np.argmax(prediction[i, j]))

    return final_pred


model = u_rec.UNet(3, 6)
model.load_state_dict(
    torch.load('../models/model.pt'))  #load your saved weights
model = tools.to_cuda(model)
model = model.eval()
infer_ids = [10, 12, 14, 16, 20, 22, 24, 27, 29, 2, 31, 33, 35, 38, 4, 6, 8]
patch_size = 256
step = 128
tifs_folder = '../top/'  #folder with the testing Vaihingen images
save_folder = 'PREDICTIONS'
os.mkdir(save_folder)

for i in infer_ids:
    print('Processing area ', i)
    img = io.imread(tifs_folder + 'top_mosaic_09cm_area{}.tif'.format(i))
    pred = sliding_window(img, patch_size, step)
    io.imsave('./' + save_folder + '/top_mosaic_09cm_area{}.tif'.format(i),
              pred)
IDSv = np.load('./trainvaltest/IDSv_train.npy')
LABSv = np.load('./trainvaltest/LABSv_train.npy')

val_IDSv = np.load('./trainvaltest/IDSv_val.npy')
val_LABSv = np.load('./trainvaltest/LABSv_val.npy')

weight_tensor = torch.FloatTensor(5)
weight_tensor[0] = 1.1
weight_tensor[1] = 0.8
weight_tensor[2] = 0.6
weight_tensor[3] = 0.9
weight_tensor[4] = 0.3

epochs = 15
model = tools.to_cuda(un.UNet())
optimizer = optim.Adam(model.parameters(), lr=0.0001)
#model=tools.to_cuda(new_u.UNet(epoch))
#model2=tools.to_cuda(new_u.U_Net2())
#optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion2 = tools.to_cuda(nn.CrossEntropyLoss(tools.to_cuda(weight_tensor)))
criterion1 = tools.to_cuda(nn.MSELoss())

confusion_matrix = tnt.meter.ConfusionMeter(5)

t_acc = []
t_Loss = []
v_acc = []
v_Loss = []

ff = open('./models/progress.txt', 'w')
コード例 #7
0
from torch.nn import init
from tqdm import tqdm
import torch.optim.lr_scheduler
import itertools
import matplotlib.pyplot as plt
import torchnet as tnt
import ConvNet1
import new_u
import tools

test_IDSv = np.load('./trainvaltest/IDSv_test.npy')
test_LABSv = np.load('./trainvaltest/LABSv_test.npy')

#model=ConvNet1.MyConvNet()
#model=Myresnet1.resnet18(1,5)
model = tools.to_cuda(new_u.U_Net2())
model.load_state_dict(torch.load("./models/model_11.pt"))
model.cuda()

criterion = nn.CrossEntropyLoss()
confusion_matrix = tnt.meter.ConfusionMeter(5)

with torch.no_grad():
    model.eval()
    test_losses = []

for i in range(0, len(test_IDSv)):
    input = io.imread(test_IDSv[i])
    input = input[:, :, 0]
    input = np.reshape(input, (1, input.shape[0], input.shape[1]))
    input = input / 255.0
コード例 #8
0
import unet1
import new_u
##################################################################################################
IDSv = np.load('./trainvaltest/IDSv_train.npy')
LABSv = np.load('./trainvaltest/LABSv_train.npy')

val_IDSv = np.load('./trainvaltest/IDSv_val.npy')
val_LABSv = np.load('./trainvaltest/LABSv_val.npy')

weight_tensor = torch.FloatTensor(5)
weight_tensor[0] = 1.1
weight_tensor[1] = 0.4
weight_tensor[2] = 0.4
weight_tensor[3] = 0.9
weight_tensor[4] = 0.3
criterion = tools.to_cuda(nn.CrossEntropyLoss(
    tools.to_cuda(weight_tensor)))  #with weights
#criterion=nn.CrossEntropyLoss().cuda() #without weights

#model=tools.to_cuda(Rs2.resnet18(1,2000,4))
model = tools.to_cuda(ConvNet1.MyConvNet())
#model=tools.to_cuda(unet.UNet(padding=True, up_mode='upsample'))
optimizer = optim.Adam(model.parameters(), lr=0.0001)

epochs = 15
ff = open('./models/progress.txt', 'w')
batchsize = 2
confusion_matrix = tnt.meter.ConfusionMeter(5)

t_acc = []
t_Loss = []
v_acc = []
コード例 #9
0
ファイル: inference.py プロジェクト: zjj-2015/UNetLSTM
    return final_pred


patch_size = args.patch_size
step = args.step

networks_folder_path = './networks/'
import sys
sys.path.insert(0, networks_folder_path)

model_type = args.model_type #choose network type ('simple' or 'lstm')
                      #'simple' refers to a simple U-Net while 'lstm' refers to a U-Net involving LSTM blocks
if model_type == 'simple':
    import network
    model=tools.to_cuda(network.U_Net(4,2,nb_dates))
elif model_type=='lstm':
    import networkL
    model=tools.to_cuda(networkL.U_Net(4,2,patch_size))
else:
 print('invalid on_network_argument')


test_areas = ['brasilia', 'milano', 'norcia', 'chongqing', 'dubai', 'lasvegas', 'montpellier', 'rio', 'saclay_w', 'valencia']
nb_dates = args.nb_dates
patch_size = args.patch_size
step = args.step

save_folder = 'PREDICTIONS'
if os.path.exists(save_folder):
    shutil.rmtree(save_folder)
コード例 #10
0
nb_dates = [
    1, 5
]  # specify the number of dates you want to use, e.g put [1,2,3,4,5] if you want to use all five dates
# or [1,2,5] to use just three of them
model_type = 'simple'  # choose network type ('simple' or 'lstm')
# 'simple' refers to a simple U-Net while 'lstm' refers to a U-Net involving LSTM blocks

networks_folder_path = './networks'
import sys

sys.path.insert(0, networks_folder_path)

model_type = 'lstm'  # choose network type ('simple' or 'lstm')
# 'simple' refers to a simple U-Net while 'lstm' refers to a U-Net involving LSTM blocks
if model_type == 'simple':
    model = tools.to_cuda(network.U_Net(4, 2, nb_dates))
elif model_type == 'lstm':
    model = tools.to_cuda(networkL.U_Net(4, 2, patch_size))
else:
    print('invalid on_network_argument')

change_dataset_train = custom.MyDataset(csv_file_train, train_areas,
                                        img_folder, lbl_folder, nb_dates,
                                        patch_size)
change_dataset_val = custom.MyDataset(csv_file_val, train_areas, img_folder,
                                      lbl_folder, nb_dates, patch_size)
mydataset_val = DataLoader(change_dataset_val, batch_size=32)

# images_train, labels_train, images_val, labels_val = tools.make_data(size_len, portion, change_dataset)
base_lr = 0.0001
optimizer = optim.Adam(model.parameters(), lr=base_lr)
コード例 #11
0
    print('Using device:', device)

    model_type = 'simple'  #choose network type ('simple' or 'lstm')
    #'simple' refers to a simple U-Net while 'lstm' refers to a U-Net involving LSTM blocks

    model_type = 'simple'  #choose network type ('simple' or 'lstm')
    #'simple' refers to a simple U-Net while 'lstm' refers to a U-Net involving LSTM blocks
    if model_type == 'simple':
        net = network.U_Net(4, 2, nb_dates)
    elif model_type == 'lstm':
        net = networkL.U_Net(4, 2, patch_size)
    else:
        net = None
        print('invalid on_network_argument')

    model = tools.to_cuda(net)

    change_dataset_train = custom.MyDataset(csv_file_train, train_areas,
                                            img_folder, lbl_folder, nb_dates,
                                            patch_size)
    change_dataset_val = custom.MyDataset(csv_file_val, train_areas,
                                          img_folder, lbl_folder, nb_dates,
                                          patch_size)
    mydataset_val = DataLoader(change_dataset_val, batch_size=32)

    # images_train, labels_train, images_val, labels_val = tools.make_data(size_len, portion, change_dataset)
    base_lr = 0.0001
    optimizer = optim.Adam(model.parameters(), lr=base_lr)
    weight_tensor = torch.FloatTensor(2)
    weight_tensor[0] = 0.20
    weight_tensor[1] = 0.80