Exemple #1
0
import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), '../'))

from drcn.graph import Graph
import tensorflow as tf
from drcn import args
from utils.load_data import load_all_data
import pickle

p_c_index, h_c_index, p_w_index, h_w_index, p_w_vec, h_w_vec, same_word, label = load_all_data(
    '../input/train.csv', data_size=None)
p_c_index_evl, h_c_index_evl, p_w_index_evl, h_w_index_evl, p_w_vec_evl, h_w_vec_evl, same_word_evl, label_evl = load_all_data(
    '../input/dev.csv', data_size=2000)

p_c_index_holder = tf.placeholder(name='p_c_index',
                                  shape=(None, args.max_char_len),
                                  dtype=tf.int32)
h_c_index_holder = tf.placeholder(name='h_c_index',
                                  shape=(None, args.max_char_len),
                                  dtype=tf.int32)
p_w_index_holder = tf.placeholder(name='p_w_index',
                                  shape=(None, args.max_word_len),
                                  dtype=tf.int32)
h_w_index_holder = tf.placeholder(name='h_w_index',
                                  shape=(None, args.max_word_len),
                                  dtype=tf.int32)
p_w_vec_holder = tf.placeholder(name='p_w_vec',
                                shape=(None, args.max_word_len,
                                       args.word_embedding_len),
Exemple #2
0
NUM_CLS = 2
parser = argparse.ArgumentParser()
parser.add_argument("--test_map_path", type=str)
parser.add_argument("--pred_path", type=str)
parser.add_argument("--model_path", type=str)
args = parser.parse_args()

TEST_MAP_PATH = args.test_map_path
PRED_PATH = args.pred_path
STRIDE = 20

model = vgg16()
model.load_weights(args.model_path)

test_data, img_names = load_data.load_all_data(MAP_PATH,
                                               '',
                                               WIN_SIZE,
                                               20,
                                               flip=False)
test_data = preprocess_input(test_data.astype('float64'))

pred = model.predict(test_data)
map_img = cv2.imread(MAP_PATH)
res = np.zeros((map_img.shape[0], map_img.shape[1]))
for i in range(pred.shape[0]):
    if pred[i, 0] > 0.99:
        idx = img_names[i].split('_')
        x, y = int(idx[0]), int(idx[1])
        res[x:x + WIN_SIZE, y:y + WIN_SIZE] = 255
cv2.imwrite(PRED_PATH, res)
Exemple #3
0
import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), '../'))

from drcn.graph import Graph
import tensorflow as tf
from drcn import args
from utils.load_data import load_all_data
import numpy as np
import pickle

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

p_c_index, h_c_index, p_w_index, h_w_index, p_w_vec, h_w_vec, same_word, label = load_all_data('../input/test.csv',
                                                                                               data_size=None)
p_c_index_holder = tf.placeholder(name='p_c_index', shape=(None, args.max_char_len), dtype=tf.int32)
h_c_index_holder = tf.placeholder(name='h_c_index', shape=(None, args.max_char_len), dtype=tf.int32)
p_w_index_holder = tf.placeholder(name='p_w_index', shape=(None, args.max_word_len), dtype=tf.int32)
h_w_index_holder = tf.placeholder(name='h_w_index', shape=(None, args.max_word_len), dtype=tf.int32)
p_w_vec_holder = tf.placeholder(name='p_w_vec', shape=(None, args.max_word_len, args.word_embedding_len),
                                dtype=tf.float32)
h_w_vec_holder = tf.placeholder(name='h_w_vec', shape=(None, args.max_word_len, args.word_embedding_len),
                                dtype=tf.float32)
same_word_holder = tf.placeholder(name='same_word', shape=(None,), dtype=tf.int32)
label_holder = tf.placeholder(name='y', shape=(None,), dtype=tf.float32)

dataset = tf.data.Dataset.from_tensor_slices(
    (p_c_index_holder, h_c_index_holder, p_w_index_holder, h_w_index_holder, p_w_vec_holder, h_w_vec_holder,
     same_word_holder, label_holder))
dataset = dataset.batch(args.batch_size).repeat(args.epochs)
Exemple #4
0
        drcn_params['max_features'] = 1700
        drcn_params['word_max_features'] = 7300
        drcn_params['word_embed_size'] = 100
        drcn_params['embed_size'] = 100

        word_embedding_matrix = load_word_embed(drcn_params['word_max_features'],drcn_params['word_embed_size'])
        char_embedding_matrix = load_char_embed(drcn_params['max_features'],drcn_params['embed_size'])

        drcn_params['embedding_matrix'] = char_embedding_matrix
        drcn_params['word_embedding_matrix'] = word_embedding_matrix

        params = drcn_params
        backend = DRCN(params)

    if model_name == "drcn":
        p_c_index, h_c_index, p_w_index, h_w_index, same_word, y = load_all_data('./input/train.csv',maxlen=params['input_shapes'][0][0])
        x = [p_c_index, h_c_index, p_w_index, h_w_index]
        y = keras.utils.to_categorical(y,num_classes=params['num_classes'])
        p_c_index_evl, h_c_index_evl, p_w_index_evl, h_w_index_evl, same_word_evl, y_eval = load_all_data('./input/dev.csv',maxlen=params['input_shapes'][0][0])
        x_eval = [p_c_index_evl, h_c_index_evl, p_w_index_evl, h_w_index_evl]
        y_eval = keras.utils.to_categorical(y_eval,num_classes=params['num_classes'])
        p_c_index_test, h_c_index_test, p_w_index_test, h_w_index_test, same_word_test, y_test = load_all_data('./input/test.csv',maxlen=params['input_shapes'][0][0])
        x_test = [p_c_index_test, h_c_index_test, p_w_index_test, h_w_index_test]
        y_test = keras.utils.to_categorical(y_test,num_classes=params['num_classes'])
    else:
        p, h, y = load_char_data('input/train.csv', data_size=None,maxlen=params['input_shapes'][0][0])
        x = [p,h]
        y = keras.utils.to_categorical(y,num_classes=params['num_classes'])
        p_eval, h_eval, y_eval = load_char_data('input/dev.csv', data_size=None,maxlen=params['input_shapes'][0][0])
        x_eval = [p_eval,h_eval]
        y_eval = keras.utils.to_categorical(y_eval,num_classes=params['num_classes'])
Exemple #5
0
MODEL_PATH = ''
batch_size = 81
wetlands = [145, 147, 149, 162, 169, 178, 372, 385]
others = [4, 29, 52, 96, 116, 297, 365, 376]
latent_dim = 32
intermediate_dim = 1024
num_cls = 2
optimizer = Adam(lr=LEARNING_RATE)
initializer = 'glorot_normal'  #'random_uniform'

# optimizer = SGD(lr=LEARNING_RATE)
# optimizer = Adagrad(lr=LEARNING_RATE)

####################################################################################
# load data
x_u, _ = load_data.load_all_data(MAP_PATH, MASK_PATH, IMG_SIZE, STRIDE)
# x_u, _ = load_data.load_wetland_samples(SUBSET_PATH)
np.random.shuffle(x_u)
x_l, target_name = load_data.load_wetland_samples(TARGET_SAMPLE_DIR)
# x_l = x_u[39:40]
# x_l_aug = data_augmentation.data_aug(x_l, SHIFT_LIST, ROTATION_ANGLE)
x_l_aug = x_l
# x_u = data_augmentation.data_aug(x_l, SHIFT_LIST, rotation)
print(x_l_aug.shape)

np.random.shuffle(x_l_aug)

x_l = np.reshape(x_l, [-1, IMG_SIZE * IMG_SIZE * 3])
x_l_aug = np.reshape(x_l_aug, [-1, IMG_SIZE * IMG_SIZE * 3])
x_u = np.reshape(x_u, [-1, IMG_SIZE * IMG_SIZE * 3])
image_size = x_u.shape[1]
Exemple #6
0
# training data generation

import cv2
import numpy as np
from utils import load_data, data_augmentation, helper
import os


MAP_PATH = './data/orcutt_2018/CA_Orcutt_20180828_TM_geo.png'
MASK_PATH = './data/orcutt_2018/wetlands.png'
SUBSET_PATH = './data/orcutt_2018/neg_samples'

IMG_SIZE = 40
STRIDE = 20
num_imgs = 1000

helper.create_folder(SUBSET_PATH)

mask = cv2.imread(MASK_PATH)
print(mask.shape)
all_data, _ = load_data.load_all_data(MAP_PATH, MASK_PATH, IMG_SIZE, STRIDE, flip=True)
print(all_data.shape)
np.random.shuffle(all_data)
# helper.remove_files(SUBSET_PATH)
for i in range(num_imgs):
    cv2.imwrite(os.path.join(SUBSET_PATH,str(i)+'.png'), all_data[i].reshape((IMG_SIZE, IMG_SIZE, 3)))
print('done...')
Exemple #7
0
from utils import load_data, data_augmentation
import numpy as np
import os
import cv2

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

TARGET_SAMPLE_DIR = "./data/bray_2001/target_samples"
MAP_PATH = './data/bray_2001/CA_Bray_100414_2001_24000_geo.png'
MASK_PATH = './data/bray_2001/wetlands.png'
SHIFT_LIST = [-20, -15, -10, -5, 0, 5, 10, 15, 20]
ROTATION_ANGLE = []
for i in range(0, 360, 90):
    ROTATION_ANGLE.append(i)
IMG_SIZE = 80
STRIDE = 30

x_u, img_names = load_data.load_all_data(MAP_PATH, MASK_PATH, IMG_SIZE, STRIDE)

x_l, target_name = load_data.load_wetland_samples(TARGET_SAMPLE_DIR)

x_l_aug = data_augmentation.data_aug(x_l, SHIFT_LIST, ROTATION_ANGLE)