Example #1
0
model.summary()

model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5',verbose=1, save_best_only=True)

results = model.fit(X_train_processed_norm, y_train_encoded, batch_size=150, epochs=30,
          validation_split=0.33, callbacks=[checkpointer],
          verbose=0, shuffle=True)


# Test accuracy

# In[17]:


model.load_weights('mnist.model.best.hdf5')
score = model.evaluate(X_test_processed_norm, y_test_encoded, verbose=0)
print('Test accuracy: %f' % score[1])


# ## Convolutional Neural Network

# In[21]:


from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D


# In[22]:

Example #2
0
model.save_weights('model1.h5')

import keras
from keras.models import model_from_json
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np

twt = [
    'A lot of good things are happening. We are respected again throughout the world, and that\'s a great thing'
]
json_file = open('model1.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights('model1.h5')

max_fatures = 2000
tokenizer = Tokenizer(num_words=max_fatures, split=' ')
tokenizer.fit_on_texts(twt)

#vectorizing the tweet by the pre-fitted tokenizer instance
twt = tokenizer.texts_to_sequences(twt)

#padding the tweet to have exactly the same shape as `embedding_2` input
twt = pad_sequences(twt, maxlen=28, dtype='int32', value=0)
print(twt)
sentiment = model.predict(twt, batch_size=1, verbose=2)[0]
if (np.argmax(sentiment) == 0):
    print("negative")
elif (np.argmax(sentiment) == 1):