Example #1
0
X_num = np.random.uniform(0, 1, (cf.N_test, 1, cf.T))
X_mask = np.zeros([cf.N_test, 1, cf.T])
y_test = np.zeros([cf.N_test, 1])
for i in range(cf.N_test):
    positions = np.random.choice(cf.T, size=2, replace=False)
    X_mask[i, 0, positions[0]] = 1
    X_mask[i, 0, positions[1]] = 1
    y_test[i, 0] = X_num[i, 0, positions[0]] + X_num[i, 0, positions[1]]
X_test = np.concatenate((X_num, X_mask), axis=1)

X_train, y_train = shuffle(X_train, y_train)

# Build model
model = build_ResTCN.ResTCN(cf.n_classes,
                            cf.n_channels,
                            cf.k,
                            cf.dp,
                            variant='AddProb')

opt = Adam(lr=cf.lr, clipnorm=1.0)

model.compile(loss='mean_squared_error', optimizer=opt)

# Train
hist = model.fit(x=X_train.reshape(-1, 1, X_train.shape[-1], 2),
                 y=y_train.reshape(y_train.shape[0], 1),
                 shuffle=True,
                 validation_data=(X_test.reshape(-1, 1, X_test.shape[-1], 2),
                                  y_test.reshape(y_test.shape[0], 1)),
                 verbose=1,
                 batch_size=cf.batch_size,
Example #2
0
eval_batch_size = 10
X_train = batchify(char_tensor(corpus, file), cf.batch_size)
X_valid = batchify(char_tensor(corpus, valfile), 1)
X_test = batchify(char_tensor(corpus, testfile), 1)

n_chars = len(corpus.dict)

n_classes = list()
n_classes.append(n_chars)
n_classes.append(cf.emb_size)

num_chans = cf.n_channels[:-1] + [cf.emb_size]

model = build_ResTCN.ResTCN(n_classes,
                            num_chans,
                            cf.k,
                            cf.dp,
                            variant='Char_PTB')

opt = SGD(learning_rate=cf.lr, clipvalue=0.15)
#opt = Adam(learning_rate=1e-3, clipnorm=0.4)
# model.compile(
#         loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
#         optimizer=opt)

train_losses = list()
valid_losses = list()
test_losses = list()
epoch = 0

lr = cf.lr
Example #3
0
X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]*X_train.shape[2]))
X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]*X_test.shape[2]))

permute = 1
if permute == 1:
    perm = np.random.permutation(X_train.shape[-1])
    X_train = X_train[:,:,perm]
    X_test = X_test[:,:,perm]

X_train, y_train = shuffle(X_train, y_train)

n_batch = X_train.shape[0] // cf.batch_size

# Build model
#model = build_ResTCN.ResTCN_d1(cf.n_classes, cf.n_channels, cf.k, cf.dp, variant='SeqMNIST')
model = build_ResTCN.ResTCN(cf.n_classes, cf.n_channels, cf.k, cf.dp, variant='SeqMNIST')

opt = Adam(lr=cf.lr)

model.compile(
             loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
             optimizer=opt,
             metrics=['accuracy'])

# Train
hist = model.fit(
    x=X_train.reshape(-1, 1, X_train.shape[-1], 1),
    y=y_train.reshape(y_train.shape[0], 1), shuffle=True, 
    validation_data=(X_test.reshape(-1, 1, X_test.shape[-1], 1), y_test.reshape(y_test.shape[0], 1)), 
    verbose=1, 
    batch_size= cf.batch_size, epochs=cf.epochs)
Example #4
0

#
# load data
#

#JSB_Chorales or Nottingham
data = loadmat('./old/TCN/polymusic/JSB_Chorales.mat')

X_train = data['traindata'][0]
X_valid = data['validdata'][0]
X_test = data['testdata'][0]

model = build_ResTCN.ResTCN(cf.n_classes,
                            cf.n_channels,
                            cf.k,
                            cf.dp,
                            variant='Nottingham')

learning_rate_fn = tf.keras.optimizers.schedules.InverseTimeDecay(
    cf.lr, 1e3, 9, staircase=True)
opt = Adam(learning_rate=cf.lr, clipnorm=0.4)
#opt = RMSprop(lr=lr, clipvalue=1.0)

model.compile(
    #loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
    loss=NLL,
    optimizer=opt)
#metrics=[mae])

train_losses = list()
Example #5
0
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.models import load_model

from sklearn.utils import shuffle

import build_ResTCN

from preprocessing_LAMBADA import data_generator, batchify, get_batch

#from train_ResTCN import train_LAMBADA

#
# load data
#

train_data, val_data, test_data, corpus = data_generator('LAMBADA', cf.seqlen)

n_words = len(corpus.dictionary)

n_classes = list()
n_classes.append(n_words)
n_classes.append(cf.emb_size)

num_chans = cf.n_channels[:-1] + [cf.emb_size]

model = build_ResTCN.ResTCN(n_classes,
                            num_chans,
                            cf.k,
                            cf.dp,
                            variant='LAMBADA')