def mnist_irnn_model(inputShape, nb_classes):
    # inputShape 2dim
    model = Sequential()
    model.add(SimpleRNN(output_dim=100,
                        init=lambda shape, name: normal(shape, scale=0.001, name=name),
                        inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
                        activation='relu',
                        input_shape=inputShape))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.summary()
Exemple #2
0
def irnn(shape_orig):
    hidden_units = 100
    learning_rate = 1e-6

    model = Sequential()
    model.add(
        SimpleRNN(
            output_dim=hidden_units,
            init=lambda shape, name: normal(shape, scale=0.001, name=name),
            inner_init=lambda shape, name: identity(
                shape, scale=1.0, name=name),
            activation='relu',
            input_shape=shape_orig))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    rmsprop = RMSprop(lr=learning_rate)
    model.compile(loss='categorical_crossentropy',
                  optimizer=rmsprop,
                  metrics=['accuracy'])

    return model
Exemple #3
0
	def buildAttention(self):
		q_relu = self.tensors['q_relu']
		a_relu = self.tensors['a_relu']
		with tf.name_scope("attention"):
			W = identity([self.params['nb_filter'], self.params['nb_filter']], name='W')
			batch = tf.shape(q_relu)[0]
			q_matmul = tf.batch_matmul(q_relu, tf.tile(tf.expand_dims(W,[0]), tf.pack([batch, tf.constant(1), tf.constant(1)])))
			qa_attention = tf.batch_matmul(q_matmul, a_relu, adj_x=False, adj_y=True, name="attention")
			# shape = (batch, q_length, 1)
			qa_attention = tf.tanh(qa_attention)
			q_max = tf.reduce_max(qa_attention, reduction_indices=[2], keep_dims=True, name='q_max')
			# shape = (batch, 1, a_length)
			a_max = tf.reduce_max(qa_attention, reduction_indices=[1], keep_dims=True, name='a_max')
			# shape = (batch, q_length, 1)
			q_softmax = tf.expand_dims(tf.nn.softmax(tf.squeeze(q_max, [2])), -1)
			# shape = (batch, a_length, 1)
			a_softmax = tf.expand_dims(tf.nn.softmax(tf.squeeze(a_max, [1])), -1)
			# https://www.tensorflow.org/versions/r0.9/api_docs/python/math_ops.html#batch_matmul 
			# shape = (batch, NUM_FILTERS, 1)
			q_feature = tf.batch_matmul(q_relu, q_softmax, adj_x=True, adj_y=False)
			a_feature = tf.batch_matmul(a_relu, a_softmax, adj_x=True, adj_y=False)
		self.tensors['q_feature'] = q_feature
		self.tensors['a_feature'] = a_feature
		self.tensors.setdefault('weights', []).append(W)
Exemple #4
0
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

print('Evaluate IRNN...')
model = Sequential()
model.add(SimpleRNN(output_dim=hidden_units,
                    init=lambda shape, name: normal(shape, scale=0.001, name=name),
                    inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
                    activation='relu',
                    input_shape=X_train.shape[1:]))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
              optimizer=rmsprop,
              metrics=['accuracy'])

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
          verbose=1, validation_data=(X_test, Y_test))

scores = model.evaluate(X_test, Y_test, verbose=0)
print('IRNN test score:', scores[0])
print('IRNN test accuracy:', scores[1])
Exemple #5
0
y_train= to_categorical(y_train1)
y_test= to_categorical(y_test1)



# reshape input to be [samples, time steps, features]
X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
X_test = np.reshape(testT, (testT.shape[0], 1, testT.shape[1]))


batch_size = 64
learning_rate = 0.1

# 1. define the network
model = Sequential()
model.add(SimpleRNN(64,init=lambda shape, name: normal(shape, scale=0.001, name=name),inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),input_dim=22,activation='relu',return_sequences=True))  # try using a GRU instead, for fun
model.add(SimpleRNN(64,activation='relu',return_sequences=True))
model.add(SimpleRNN(64,activation='relu',return_sequences=True))
model.add(SimpleRNN(64,activation='relu',return_sequences=True))
model.add(SimpleRNN(64,activation='relu',return_sequences=True))
model.add(SimpleRNN(64,activation='relu',return_sequences=False))
model.add(Dense(11))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)

# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="kddresults/lstm6layer/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='val_acc',mode='max')
csv_logger = CSVLogger('training_set_iranalysis6.csv',separator=',', append=False)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1000, validation_data=(X_test, y_test),callbacks=[checkpointer,csv_logger])
model.save("kddresults/lstm6layer/lstm1layer_model.hdf5")
class LossHistory(keras.callbacks.Callback):
    def on_train_begin(self, logs={}):
        self.losses = []
        self.times = []
        self.start_time = time.time()

    def on_epoch_end(self, batch, logs={}):
        self.times.append(time.time()-self.start_time)
        self.losses.append(logs.get("val_acc"))
history = LossHistory()

print('Evaluate %s...' % ClassModel.__class__.__name__)
model = Sequential()
model.add(ClassModel(input_dim=1, output_dim=hidden_units,
                    init=lambda shape: normal(shape, scale=0.001),
                    inner_init=lambda shape: identity(shape, scale=1.0),
                    activation='relu', truncate_gradient=BPTT_truncate))
model.add(Dense(hidden_units, nb_classes))
model.add(Activation('sigmoid'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
          show_accuracy=True, verbose=1, validation_data=(X_test, Y_test),  callbacks=[history])

scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('%s test score:' % ClassModel.__class__.__name__, scores[0])
print('%s test accuracy:' % ClassModel.__class__.__name__, scores[1])

record_file = FileIO()
record_file.save_pickle(history.losses, "%s_record_titan_x_losses" % ClassModel.__class__.__name__.lower())
Exemple #7
0
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

print('Evaluate IRNN...')
model = Sequential()
model.add(
    SimpleRNN(output_dim=hidden_units,
              init=lambda shape: normal(shape, scale=0.001),
              inner_init=lambda shape: identity(shape, scale=1.0),
              activation='relu',
              truncate_gradient=BPTT_truncate,
              input_shape=(None, 1)))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)

model.fit(X_train,
          Y_train,
          batch_size=batch_size,
          nb_epoch=nb_epochs,
          show_accuracy=True,
          verbose=1,
          validation_data=(X_test, Y_test))
Exemple #8
0
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

print('Evaluate IRNN...')
model = Sequential()
model.add(SimpleRNN(output_dim=hidden_units,
                    init=lambda shape, name: normal(shape, scale=0.001, name=name),
                    inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
                    activation='relu',
                    input_shape=X_train.shape[1:]))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
              optimizer=rmsprop,
              metrics=['accuracy'])

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
          verbose=1, validation_data=(X_test, Y_test))

scores = model.evaluate(X_test, Y_test, verbose=0)
print('IRNN test score:', scores[0])
print('IRNN test accuracy:', scores[1])
Exemple #9
0
T = [[valid_chars[y] for y in x] for x in T]


X_test = sequence.pad_sequences(T, maxlen=max_len)


y_train = np.array(trainlabel)
y_test = np.array(testlabel)


embedding_vecor_length = 128

# 1. define the network
model = Sequential()
model.add(Embedding(max_features, embedding_vecor_length, input_length=max_len))
model.add(SimpleRNN(128,init=lambda shape, name: normal(shape, scale=0.001, name=name),inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),activation='relu',))  # try using a GRU instead, for fun
#model.add(Dropout(0.1))
model.add(Dense(1))
model.add(Activation('sigmoid'))
rmsprop = RMSprop(lr=learning_rate)

model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="logs/irnn/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='val_acc',mode='max')
csv_logger = CSVLogger('logs/irnn/training_set_irnnanalysis.csv',separator=',', append=False)
model.fit(X_train, y_train, batch_size=32, nb_epoch=1000,validation_split=0.33, shuffle=True,callbacks=[checkpointer,csv_logger])
score, acc = model.evaluate(X_test, y_test, batch_size=32)
print('Test score:', score)
print('Test accuracy:', acc)