X_train = mnist.train.images
X_test = mnist.test.images
Y_test = mnist.test.labels
print X_train.shape
input_data = Input((28 * 28, ))
temp_data = Dense(128)(input_data)
temp_data = Activation('relu')(temp_data)
temp_data = Dense(64)(temp_data)
temp_data = Activation('relu')(temp_data)
temp_data = Dense(10)(temp_data)
output_data = Activation('softmax')(temp_data)
model = Model(inputs=[input_data], outputs=[output_data])
modelcheck = ModelCheckpoint('model.hdf5',
                             monitor='loss',
                             verbose=1,
                             save_best_only=True)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])
model.fit([mnist.train.images], [mnist.train.labels],
          batch_size=256,
          epochs=5,
          callbacks=[modelcheck],
          validation_data=(mnist.test.images, mnist.test.labels))

#Y_pred = model.predict_proba(X_test, verbose=0)
score = model.evaluate(X_test, Y_test, verbose=0)
print('测试集 score(val_loss): %.4f' % score[0])
print('测试集 accuracy: %.4f' % score[1])
model.save('./model_bp/model1.hdf5')
예제 #2
0
# %%
from keras import Input, layers
from keras import Model

input_tensor = Input(shape=(64,))
x = layers.Dense(32, activation='relu')(input_tensor)
x = layers.Dense(32, activation='relu')(x)
output_tensor = layers.Dense(10, activation='softmax')(x)
model = Model(input_tensor, output_tensor)
model.summary()

# %%
import numpy as np
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')

x_train = np.random.random((1000, 64))
y_train = np.random.random((1000, 10))

model.fit(x_train, y_train, epochs=10, batch_size=128)
score = model.evaluate(x_train, y_train)
예제 #3
0
model_weight_file = './model_multi_head_attention.h5'
model_file = './model_multi_head_attention.model'
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model_checkpoint = ModelCheckpoint(model_weight_file, save_best_only=True, save_weights_only=True)
model.fit(x_train_word_index,
          y_train_index,
          batch_size=8,
          epochs=1000,
          verbose=2,
          callbacks=[early_stopping, model_checkpoint],
          validation_data=(x_dev_word_index, y_dev_index),
          shuffle=True)

model.load_weights(model_weight_file)
model.save(model_file)
evaluate = model.evaluate(x_test_word_index, y_test_index, batch_size=8, verbose=2)
print('loss value=' + str(evaluate[0]))
print('metrics value=' + str(evaluate[1]))

# no position embedding
# loss value=0.8326842557816279
# metrics value=0.7539682530221485

# TrigPosEmbedding
# loss value=1.0734127722089253
# metrics value=0.6111111111111112

# PositionEmbedding
# loss value=0.9529068337546455
# metrics value=0.6587301596762642
예제 #4
0
n_samples = 2
dx = 2
dy = 3
dout = 7
mask_value = -1

X = np.random.randint(5, size=(n_samples, dx, dy))
X[1, 0, :] = mask_value

inp = Input(shape=(dx, dy))
x = Masking(mask_value=-1.0)(inp)
import pdb
pdb.set_trace()
lstm_fw = LSTM(dout, return_sequences=True, go_backwards=False)(x)
lstm_bw = LSTM(dout, return_sequences=True, go_backwards=True)(x)
concat = Concatenate(axis=-1)([lstm_fw, lstm_bw])
model_3 = Model(inputs=inp, outputs=concat)
model_3.summary()
model_3.set_weights(
    [np.ones(l.shape) * i for i, l in enumerate(model_3.get_weights(), 2)])
model_3.compile(optimizer="rmsprop", loss="mae")
y_true = np.ones((n_samples, dx, model_3.layers[-1].output_shape[-1]))
y_pred_3 = model_3.predict(X)
print(y_pred_3)
unmasked_loss = np.abs(1 - y_pred_3).mean()
masked_loss = np.abs(1 - y_pred_3[y_pred_3 != 0.0]).mean()
keras_loss = model_3.evaluate(X, y_true, verbose=0)
print(f"unmasked loss: {unmasked_loss}")
print(f"masked loss: {masked_loss}")
print(f"evaluate with Keras: {keras_loss}")
예제 #5
0
class Inceptionv3(object):
    def __init__(self, output_classes, batch_size, no_epochs,
                 input_image_size):
        self.input_image_size = input_image_size
        self.batch_size = batch_size
        self.no_epochs = no_epochs
        self.output_classes = output_classes

    def initialize_model(self):
        self.base_model = InceptionV3(
            weights='imagenet',
            include_top=False,
            input_tensor=Input(shape=self.input_image_size))
        x = self.base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu', name='last_layer')(x)
        self.predictions = Dense(self.output_classes,
                                 activation='softmax',
                                 name='final_output')(x)

        self.output_layer = self.predictions
        self.last_layer = x

        self.probs_layer_model = Model(inputs=self.base_model.input,
                                       outputs=self.predictions)
        self.last_layer_model = Model(inputs=self.base_model.input,
                                      outputs=self.last_layer)

        for layer in self.base_model.layers:
            layer.trainable = False
        self.optimizer = optimizers.RMSprop(lr=0.0001, decay=1e-6)
        self.probs_layer_model.compile(optimizer=self.optimizer,
                                       loss='categorical_crossentropy',
                                       metrics=['accuracy'])

        self.probs_layer_model.summary()

    def train_model(self, data_X, data_Y, cv_X, cv_Y):
        #data_X, data_Y = data_X.astype('float32'), data_Y.astype('float32')
        #cv_X, cv_Y = cv_X.astype('float32'), cv_Y.astype('float32')
        data_Y = convert_one_hot(data_Y, self.output_classes)
        cv_Y = convert_one_hot(cv_Y, self.output_classes)
        print(data_X.shape, cv_X.shape, data_Y.shape, cv_Y.shape)
        for i in range(cv_X.shape[0]):
            print(data_Y[i])
            cv2.imshow('image', data_X[i])
            cv2.imshow('cropped_image', cv_X[i])
            cv2.waitKey(0)
            cv2.destroyAllWindows()
            a = input()
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(featurewise_center=False,
                                     samplewise_center=False,
                                     featurewise_std_normalization=False,
                                     samplewise_std_normalization=False,
                                     zca_whitening=False,
                                     zca_epsilon=1e-06,
                                     rotation_range=30,
                                     width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     shear_range=0.,
                                     zoom_range=0.,
                                     channel_shift_range=0.,
                                     fill_mode='nearest',
                                     cval=0.,
                                     horizontal_flip=True,
                                     vertical_flip=False,
                                     rescale=None,
                                     preprocessing_function=None,
                                     data_format=None,
                                     validation_split=0.0)
        datagen.fit(data_X)
        self.probs_layer_model.fit_generator(
            datagen.flow(data_X, data_Y, batch_size=self.batch_size),
            epochs=5,
            validation_data=(cv_X, cv_Y),
            steps_per_epoch=ceil(data_X.shape[0] / self.batch_size),
            workers=4)

        for i, layer in enumerate(self.base_model.layers):
            print(i, layer.name)

        for layer in self.probs_layer_model.layers[:249]:
            layer.trainable = False
        for layer in self.probs_layer_model.layers[249:]:
            layer.trainable = True

        self.new_optimizer = optimizers.SGD(lr=0.0001, momentum=0.9)
        self.probs_layer_model.compile(optimizer=self.new_optimizer,
                                       loss='categorical_crossentropy',
                                       metrics=['accuracy'])
        self.probs_layer_model.fit_generator(
            datagen.flow(data_X, data_Y, batch_size=self.batch_size),
            epochs=self.no_epochs,
            validation_data=(cv_X, cv_Y),
            steps_per_epoch=ceil(data_X.shape[0] / self.batch_size),
            workers=4)

    def get_output(self, data_X, with_acc, data_Y):
        probs = self.probs_layer_model.predict(data_X)
        final_layer = self.last_layer_model.predict(data_X)
        preds = np.argmax(np.array(probs), axis=-1)
        if (with_acc):
            data_Y = to_categorical(data_Y)
            _, acc = self.probs_layer_model.evaluate(data_X, data_Y, verbose=0)
            return probs, preds, final_layer, acc
        else:
            return probs, preds, final_layer
예제 #6
0
class Neural_Network(object):
    def __init__(self,
                 epochs,
                 batch_size,
                 learning_rate,
                 input_size,
                 output_classes,
                 hidden_layers,
                 mc_dropout=False,
                 dropout_rate=None):
        self.epochs = epochs
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.input_size = input_size
        self.output_classes = output_classes
        self.hidden_layers = hidden_layers
        self.mc_dropout = mc_dropout
        self.train_dropout_rate = 0.0
        self.dropout_rate = dropout_rate

    def lr_schedule(self, epoch):
        lrate = self.learning_rate
        if epoch > 50:
            lrate = lrate / 10
        if epoch > 75:
            lrate = lrate / 5
        return lrate

    def create_tf_model(self, name):
        # self.model = Sequential()
        no_hidden_layers = len(self.hidden_layers)
        #
        # for i in range(no_hidden_layers):
        #    if(i == 0):
        #        self.model.add(Dense(self.hidden_layers[0], input_dim = self.input_size, activation = 'relu'))
        #    else:
        #        self.model.add(Dense(self.hidden_layers[i], activation = 'relu'))
        #
        # if(no_hidden_layers == 0):
        #    self.model.add(Dense(self.output_classes, input_dim = self.input_size, activation = 'sigmoid'))
        # else:
        #    self.model.add(Dense(self.output_classes, activation = 'sigmoid'))
        #
        self.inp = Input(shape=(self.input_size, ))
        for i in range(no_hidden_layers):
            if (i == 0):
                outp = Dense(self.hidden_layers[0],
                             activation='linear',
                             kernel_initializer=initializers.TruncatedNormal(
                                 stddev=0.1),
                             bias_initializer=initializers.Constant(1))(
                                 self.inp)
                #kernel_regularizer = regularizers.l2(0.01)
                #, activity_regularizer = regularizers.l1(0.01)
                #outp = Dense(self.hidden_layers[0], activation='linear')(self.inp)
                #outp = BatchNormalization()(outp)
                outp = Activation('relu')(outp)
            else:
                outp = Dense(self.hidden_layers[i],
                             activation='linear',
                             kernel_initializer=initializers.TruncatedNormal(
                                 stddev=0.1),
                             bias_initializer=initializers.Constant(1))(outp)
                #kernel_regularizer = regularizers.l2(0.01)
                #, activity_regularizer = regularizers.l1(0.01)
                #outp = Dense(self.hidden_layers[i], activation='linear')(outp)
                #outp = BatchNormalization()(outp)
                outp = Activation('relu')(outp)
            outp = Dropout(0.5)(outp, training=self.mc_dropout)

        if (no_hidden_layers == 0):
            outp = Dense(self.output_classes, activation='linear')(self.inp)
            self.predictions = Activation('softmax')(outp)
        else:
            outp = Dense(self.output_classes, activation='linear')(outp)
            self.predictions = Activation('softmax')(outp)
        #self.model = Model(self.inp, outp, name=name + '_keras')
        self.model = Model(self.inp, self.predictions, name=name + '_keras')

        print(self.model.layers[-3].output.shape)
        print(self.model.layers[-2].output.shape)
        self.get_final_layer_model_output = K.function(
            [self.model.layers[0].input], [self.model.layers[-3].output])
        #self.get_preds = K.function([self.model.layers[0].input], [self.predictions])
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        #loss='kullback_leibler_divergence'
        #self.model.summary()
        #for layer in self.model.layers:
        #    print (layer)

    def train_model(self, data_X, data_Y, cv_X, cv_Y):
        #data_X, data_Y = data_X.astype('float32'), data_Y.astype('float32')
        #cv_X, cv_Y = cv_X.astype('float32'), cv_Y.astype('float32')
        data_Y = convert_one_hot(data_Y, self.output_classes)
        cv_Y = convert_one_hot(cv_Y, self.output_classes)

        es_callback = EarlyStopping(monitor='val_loss', patience=3)
        print(data_X.shape, data_Y.shape, cv_X.shape, cv_Y.shape)
        #log_dir="logs/CNN/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        #tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
        #TODO::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::ADD CROSS VALIDATION HERE
        #self.model.fit(data_X, data_Y, epochs = self.no_epochs, batch_size = self.batch_size, callbacks=[tensorboard_callback])
        #self.model.fit(data_X, data_Y, epochs = self.no_epochs, batch_size = self.batch_size, validation_data = (cv_X, cv_Y), shuffle = True)

        #selector = SelectKBest(f_classif, k=1000)
        #selected_cv_features = selector.fit_transform(data_X, data_Y)
        #print(selected_cv_features.shape)
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            zca_epsilon=1e-06,  # epsilon for ZCA whitening
            rotation_range=
            0,  # randomly rotate images in the range (degrees, 0 to 180)
            # randomly shift images horizontally (fraction of total width)
            width_shift_range=0.1,
            # randomly shift images vertically (fraction of total height)
            height_shift_range=0.1,
            shear_range=0.,  # set range for random shear
            zoom_range=0.,  # set range for random zoom
            channel_shift_range=0.,  # set range for random channel shifts
            # set mode for filling points outside the input boundaries
            fill_mode='nearest',
            cval=0.,  # value used for fill_mode = "constant"
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False,  # randomly flip images
            # set rescaling factor (applied before any other transformation)
            rescale=None,
            # set function that will be applied on each input
            preprocessing_function=None,
            # image data format, either "channels_first" or "channels_last"
            data_format=None,
            # fraction of images reserved for validation (strictly between 0 and 1)
            validation_split=0.2)
        #datagen.fit(data_X)

        #log_dir="logs/ANN/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        #tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)

        #self.model.fit_generator(datagen.flow(data_X, data_Y, batch_size = self.batch_size), epochs = self.no_epochs,
        #                        validation_data = (cv_X, cv_Y), steps_per_epoch = ceil(data_X.shape[0]/self.batch_size), workers = 4, callbacks = [tensorboard_callback])

        print(self.epochs, self.batch_size)
        history = self.model.fit(
            data_X,
            data_Y,
            epochs=self.epochs,
            batch_size=self.batch_size,
            validation_data=(cv_X, cv_Y),
            shuffle=True,
            verbose=0,
            callbacks=[LearningRateScheduler(self.lr_schedule)])
        #tensorboard_callback,
        #get_plot(history)

        scores = self.model.evaluate(data_X, data_Y)
        print("Accuracy obtained is : %.2f%%" % (scores[1] * 100))

    def get_predictions(self, data_X, with_acc, data_Y):
        #data_X = data_X.reshape(data_X.shape[0], self.input_size)
        if (with_acc):
            #probs, acc, final_layer = self.get_preds([data_X])[0], self.model.evaluate(data_X, data_Y)[1]*100, self.get_final_layer_model_output([data_X])[0]
            probs, acc, final_layer = self.model.predict(
                data_X), self.model.evaluate(
                    data_X,
                    data_Y)[1] * 100, self.get_final_layer_model_output(
                        [data_X])[0]
            preds = np.argmax(np.array(probs), axis=-1)
            #with self.test_summary_writer.as_default():
            #    tf.summary.scalar('loss', loss, step=epoch)
            #    tf.summary.scalar('accuracy', acc, step=epoch)

            return probs, preds, final_layer, acc
        else:
            probs, final_layer = self.get_preds(
                [data_X])[0], self.get_final_layer_model_output([data_X])[0]
            preds = np.argmax(np.array(probs), axis=-1)
            return probs, preds, final_layer
예제 #7
0
avgL = Average()([input1, input2, input3])
maxL = Maximum()([input1, input2, input3])
concatL = Concatenate()([avgL, maxL])
layer1 = Dense(nb_hidden, bias_initializer='zeros', activation='relu')(concatL)
layer1 = Dropout(0.75)(layer1)
out = Dense(nb_classes,
            bias_initializer='zeros',
            kernel_initializer='identity',
            activation='softmax')(layer1)
model = Model([input1, input2, input3], out)
#model=load_model('genFit_ens450.h5');

model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])
G = myGeneratorTrain()
K = next(G)
X_i = K[0]
Y_i = K[1]
model.fit(X_i, Y_i)
debugModel = Model(inputs=model.input, outputs=model.layers[0].output)
history = model.fit_generator(myGeneratorTrain(),
                              steps_per_epoch=400,
                              epochs=200,
                              verbose=2,
                              validation_data=myGeneratorVal(),
                              validation_steps=100)

model.evaluate([X_test[:, 0:1000], X_test[:, 1000:2000], X_test[:, 2000:3000]],
               np_utils.to_categorical(Y[testIdx] - 1, 1000))