def test_sequential_fit_generator(): (x_train, y_train), (x_test, y_test) = _get_test_data() def data_generator(train): if train: max_batch_index = len(x_train) // batch_size else: max_batch_index = len(x_test) // batch_size i = 0 while 1: if train: yield (x_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (x_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index model = Sequential() model.add(Dense(num_hidden, input_shape=(input_dim,))) model.add(Activation('relu')) model.add(Dense(num_class)) model.pop() model.add(Dense(num_class)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.fit_generator(data_generator(True), 5, epochs) model.fit_generator(data_generator(True), 5, epochs, validation_data=(x_test, y_test)) model.fit_generator(data_generator(True), 5, epochs, validation_data=data_generator(False), validation_steps=3) model.fit_generator(data_generator(True), 5, epochs, max_queue_size=2) model.evaluate(x_train, y_train)
def build_model(): model = Sequential() model.add(Embedding(input_dim=vocab_size, output_dim=emb_dim, input_length=maxlen, weights=[embedding_weights])) model.add(Dropout(0.2)) model.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,))) model.add(Dropout(0.5)) model.add(Dense(512)) model.add(BatchNormalization()) model.add(PReLU()) model.add(Dropout(0.5)) model.add(RepeatVector(maxlen)) model.add(TimeDistributed(Dense(300))) model.add(BatchNormalization()) model.add(PReLU()) model.add(Dropout(0.5)) model.add(TimeDistributed(Dense(vocab_size, activation='softmax'))) model.load_weights(weights_path) for _ in range(6): model.pop() model.add(Dense(nb_labels, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def buildModel(self, modelPath): layers = [] layers.append(ZeroPadding2D((1,1), input_shape=(3, 224, 224), name="input")) layers.append(Conv2D(64, kernel_size=(3 , 3), activation='relu', name='conv1_1')) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(64, kernel_size=(3 , 3), activation='relu', name='conv1_2')) layers.append(MaxPooling2D((2, 2), strides=(2, 2))) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(128, kernel_size=(3 , 3), activation='relu', name='conv2_1')) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(128, kernel_size=(3 , 3), activation='relu', name='conv2_2')) layers.append(MaxPooling2D((2, 2), strides=(2, 2))) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(256, kernel_size=(3 , 3), activation='relu', name='conv3_1')) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(256, kernel_size=(3 , 3), activation='relu', name='conv3_2')) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(256, kernel_size=(3 , 3), activation='relu', name='conv3_3')) layers.append(MaxPooling2D((2, 2), strides=(2, 2))) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(512, kernel_size=(3 , 3), activation='relu', name='conv4_1')) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(512, kernel_size=(3 , 3), activation='relu', name='conv4_2')) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(512, kernel_size=(3 , 3), activation='relu', name='conv4_3')) layers.append(MaxPooling2D((2, 2), strides=(2, 2))) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(512, kernel_size=(3 , 3), activation='relu', name='conv5_1')) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(512, kernel_size=(3 , 3), activation='relu', name='conv5_2')) layers.append(ZeroPadding2D((1, 1))) layers.append(Conv2D(512, kernel_size=(3 , 3), activation='relu', name='conv5_3')) layers.append(MaxPooling2D((2, 2), strides=(2, 2))) layers.append(Flatten(name='flat')) layers.append(Dense(4096, activation='relu', name='fc6')) layers.append(Dropout(0.5, name='dopout0')) layers.append(Dense(4096, activation='relu', name='fc7')) layers.append(Dropout(0.5, name='dropout1')) layers.append(Dense(1000, activation='softmax', name='softmax')) model = Sequential() for layer in layers: model.add(layer) while model.layers[-1].name != self.layerKey: model.pop() model.load_weights(modelPath, by_name=True) self.model = TimeDistributed(model)
def test_sequential_pop(): model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim)) model.add(Dense(num_classes)) model.compile(loss='mse', optimizer='sgd') x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) model.pop() assert len(model.layers) == 1 assert model.output_shape == (None, num_hidden) model.compile(loss='mse', optimizer='sgd') y = np.random.random((batch_size, num_hidden)) model.fit(x, y, epochs=1)
def test_sequential_fit_generator(): (X_train, y_train), (X_test, y_test) = _get_test_data() def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: yield (X_train[i * batch_size : (i + 1) * batch_size], y_train[i * batch_size : (i + 1) * batch_size]) else: yield (X_test[i * batch_size : (i + 1) * batch_size], y_test[i * batch_size : (i + 1) * batch_size]) i += 1 i = i % max_batch_index model = Sequential() model.add(Dense(nb_hidden, input_shape=(input_dim,))) model.add(Activation("relu")) model.add(Dense(nb_class)) model.pop() model.add(Dense(nb_class)) model.add(Activation("softmax")) model.compile(loss="categorical_crossentropy", optimizer="rmsprop") model.fit_generator(data_generator(True), len(X_train), nb_epoch) model.fit_generator(data_generator(True), len(X_train), nb_epoch, validation_data=(X_test, y_test)) model.fit_generator( data_generator(True), len(X_train), nb_epoch, validation_data=data_generator(False), nb_val_samples=batch_size * 3, ) model.fit_generator(data_generator(True), len(X_train), nb_epoch, max_q_size=2) loss = model.evaluate(X_train, y_train)