# Convolution
kernel_size = 5
filters = 96
pool_size = 2

# RNN
rnn_output_size = 70

# Training
batch_size = 512
epochs = 5



print('Loading data...')
(x_train, y_train), (x_val, y_val), (x_test, y_test) = sentiment_140_neg.load_data()

print('Fitting tokenizer...')
tokenizer = Tokenizer()
tokenizer.fit_on_texts(np.concatenate((x_train, x_val, x_test)))

print('Convert text to sequences')
x_train = tokenizer.texts_to_sequences(x_train)
x_val = tokenizer.texts_to_sequences(x_val)
x_test = tokenizer.texts_to_sequences(x_test)

print('Pad sequences (samples x time)')

x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_val = sequence.pad_sequences(x_val, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
# Convolution
kernel_size = 5
filters = 96
pool_size = 2

# RNN
rnn_output_size = 70

# Training
batch_size = 512
epochs = 5

print('Loading data...')
(x_train, y_train), (x_val, y_val), (x_test,
                                     y_test) = sentiment_140_neg.load_data()

print('Fitting tokenizer...')
tokenizer = Tokenizer()
tokenizer.fit_on_texts(np.concatenate((x_train, x_val, x_test)))

print('Convert text to sequences')
x_train = tokenizer.texts_to_sequences(x_train)
x_val = tokenizer.texts_to_sequences(x_val)
x_test = tokenizer.texts_to_sequences(x_test)

print('Pad sequences (samples x time)')

x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_val = sequence.pad_sequences(x_val, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)