示例#1
0
def main(dataset="babl"):
    if dataset == "nlvr":
        data = prepare.load_data(train_json)
        data = prepare.tokenize_data(data, mxlen)
        imgs, ws, labels = prepare.load_images(train_img_folder,
                                               data,
                                               debug=True)
        data.clear()
        imgs_mean = np.mean(imgs)
        imgs_std = np.std(imgs - imgs_mean)
        imgs = (imgs - imgs_mean) / imgs_std

    else:
        inputs_train, queries_train, answers_train, inputs_test, queries_test, answers_test = prepare.get_babl_data(
        )

    epochs = 100
    model = build_model.model()
    model.fit([inputs_train, queries_train],
              answers_train,
              validation_split=0.1,
              epochs=epochs)
    model.save('model')
示例#2
0
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
import gc
import prepare
import subprocess

mxlen = 32
embedding_dim = 50
lstm_unit = 128
MLP_unit = 128
epochs = 100

train_json = 'nlvr\\train\\train.json'
train_img_folder = 'nlvr\\train\\images'
data = prepare.load_data(train_json)
data = prepare.tokenize_data(data, mxlen)
imgs, ws, labels = prepare.load_images(train_img_folder, data, debug=True)
data.clear()
imgs_mean = np.mean(imgs)
imgs_std = np.std(imgs - imgs_mean)
imgs = (imgs - imgs_mean) / imgs_std

epochs = 100
batch_size = 64


def bn_layer(x, conv_unit):
    def f(inputs):
        md = Conv2D(x, (conv_unit, conv_unit), padding='same')(inputs)
        md = BatchNormalization()(md)
        return Activation('relu')(md)
print(EMBED_HIDDEN_SIZE, CNN_EMBED_SIZE, LSTM_HIDDEN_UNITS,
      mxlen, seed)
    
EPOCHS = 60
BATCH_SIZE = 256

train_json = 'data/nlvr/train/train.json'
train_img_folder = 'data/nlvr/train/images'
dev_json = 'data/nlvr/dev/dev.json'
dev_img_folder = 'data/nlvr/dev/images'

print('Processing input data...')

data = prepare.load_data(train_json)
prepare.init_tokenizer(data)
data = prepare.tokenize_data(data, mxlen)
imgs, ws, labels = prepare.load_images(train_img_folder, data, section=True)
data.clear()

dev_data = prepare.load_data(dev_json)
dev_data = prepare.tokenize_data(dev_data, mxlen)
dev_imgs, dev_ws, dev_labels = prepare.load_images(dev_img_folder, dev_data, section=True)
dev_data.clear()

imgs_mean = np.mean(imgs)
imgs_std = np.std(imgs - imgs_mean)
imgs = (imgs - imgs_mean) / (imgs_std + 1e-7)

dev_imgs = (dev_imgs - imgs_mean) / (imgs_std + 1e-7)

imgs_1, imgs_2, imgs_3 = imgs[:,0,:,:], imgs[:,1,:,:], imgs[:,2,:,:]