示例#1
0
def construct_model():
    output_parameters()
    model = Sequential()
    model.add(Input(shape = (user_id_max, max_len)))
    model.add(LSTM(embedding_size))

    if label_name == "age":
        model.add(Dense(10, activation = 'softmax'))
    elif label_name == 'gender':
        model.add(Dense(1, activation = 'sigmoid'))
    pass
    print("%s——模型构建完成!" % model_type)

    print("* 编译模型")
    if label_name == "age":
        model.compile(optimizer = optimizers.RMSprop(lr = RMSProp_lr),
                      loss = losses.sparse_categorical_crossentropy,
                      metrics = [metrics.sparse_categorical_accuracy])
    elif label_name == 'gender':
        model.compile(optimizer = optimizers.RMSprop(lr = RMSProp_lr),
                      loss = losses.binary_crossentropy,
                      metrics = [metrics.binary_accuracy])
    pass
    print("%s——模型编译完成!" % model_type)
    return model
    def createModel( self, inputs, outputs, hiddenLayers, activationType, learningRate ):
        model   = Sequential()
        if len(hiddenLayers) == 0:
            model.add( Dense(self.output_size,
                                input_shape=(self.input_size,),
                                init='lecun_uniform') )
            model.add( Activation('linear') )
        else:
            model.add( Dense(hiddenLayers[0],
                                input_shape=(self.input_size,),
                                kernel_initializer='lecun_uniform') ) 
            if activationType == 'LeakyReLU':
                model.add( LeakyReLU(alpha=0.01) )
            else:
                model.add( Activation(activationType) )

            for index in range(1,len(hiddenLayers)):
                layerSize   = hiddenLayers[index]
                model.add( Dense(layerSize,kernel_initializer='lecun_uniform') )
                if activationType == 'LeakyReLU':
                    model.add( LeakyReLU(alpha=0.01) )
                else:
                    model.add( Activation(activationType) )
            model.add( Dense(self.output_size,kernel_initializer='lecun_uniform') )
            model.add( Activation('linear') )
        optimizer   = optimizers.RMSprop( lr = learningRate, rho = 0.9, epsilon = 1e-6 )
        model.compile( loss = "mse", optimizer = optimizer )
        model.summary()
        return model
示例#3
0
 def get_opt():
     if opt == 'adam':
         return optimizers.Adam(lr=lr, clipnorm=1.)
     elif opt == 'rmsprop':
         return optimizers.RMSprop(lr=lr, clipnorm=1.)
     else:
         raise Exception('Only Adam and RMSProp are available here')
示例#4
0
    def contruModelo(self):
        cnn = Sequential()
        cnn.add(
            Convolution2D(self.filtrosConv1,
                          self.tamano_filtro1,
                          padding="same",
                          input_shape=(self.altura, self.longitud, 3),
                          activation='relu'))
        cnn.add(MaxPooling2D(pool_size=self.tamano_pool))

        cnn.add(
            Convolution2D(self.filtrosConv2,
                          self.tamano_filtro2,
                          padding="same",
                          activation='relu'))
        cnn.add(MaxPooling2D(pool_size=self.tamano_pool))

        cnn.add(
            Convolution2D(self.filtrosConv3,
                          self.tamano_filtro3,
                          padding="same",
                          activation='relu'))
        cnn.add(MaxPooling2D(pool_size=self.tamano_pool))

        cnn.add(Flatten())
        #cnn.add(Dense(16,activation='relu'))
        cnn.add(Dense(256, activation='relu'))  #sigmoidal--- lineal
        cnn.add(Dense(self.cantidad_acciones, activation='softmax'))  #tanh

        cnn.compile(loss='mse',
                    optimizer=optimizers.RMSprop(lr=self.aprendizaje))
        return cnn
示例#5
0
    def build_model(self):
        model = Sequential()
        model.add(Conv2D(32, (4, 4), strides=(2, 2), input_shape=(24, 10, 1),  # batch_size=64,
                         kernel_initializer=initializers.glorot_uniform(), activation=activations.relu,
                         kernel_regularizer=regularizers.l2(0.01)))  # kernel initialize weights

        model.add(Conv2D(64, (3, 3), strides=(1, 1), activation=activations.relu))

        model.add(Conv2D(128, (2, 2), strides=(1, 1), activation=activations.relu))

        # model.add(Dropout(0.5))
        model.add(Flatten())

        # model.add(Dense(512, input_dim=self.state_size, activation=activations.linear))  # autograd,PLRelu,RMS Prob
        # # model.add(LeakyReLU(alpha=0.3))
        # model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
        # # model.add(LeakyReLU(alpha=0.3))
        # model.add(Dense(256, activation=activations.linear))
        # model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
        # model.add(Dense(128, activation=activations.linear))
        # model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
        # model.add(Dense(64, activation=activations.linear))
        # model.add(BatchNormalization(momentum=0.99, epsilon=0.001))

        model.add(Dense(self.action_size, activation=activations.softmax))

        model.compile(loss=losses.categorical_crossentropy,  # loss='mse' losses.categorical_crossentropy
                      optimizer=optimizers.RMSprop(lr=self.LEARNING_RATE))  # RMSprob,Adam,Nadam
        self.tensorBoard = TensorBoard('./logs/RLAgent', histogram_freq=0,
                                       write_graph=True, write_images=True)
        model.summary()
        return model
示例#6
0
def create_model(summary):
    model = Sequential()
    model.add(
        Conv2D(
            32,
            (7, 7),
            # количество каналов изображения (ПРОВЕРИТЬ!)
            input_shape=IMAGE_SIZE,
            activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (5, 5), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (5, 5), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.RMSprop(lr=1e-4),
                  metrics=['acc'])

    if summary == True: print(model.summary())

    return model
示例#7
0
def main():
    '''MAIN'''
    # load data
    (x_train, y_train), (x_test, y_test) = import_data()

    x_val = x_train[:10000]
    partial_x_train = x_train[10000:]
    y_val = y_train[:10000]
    partial_y_train = y_train[10000:]

    # build model
    model = build_model(n_layers=5, layer_size=32)
    model.compile(optimizer=optimizers.RMSprop(lr=0.001),
                  loss='binary_crossentropy',
                  metrics=[metrics.binary_accuracy])

    history = model.fit(partial_x_train,
                        partial_y_train,
                        epochs=20,
                        batch_size=512,
                        validation_data=(x_val, y_val))

    # create plots
    plot_loss_results(history)
    plot_acc_results(history)
示例#8
0
    def instantiate_model(self):
        self.model = create_model_resnet(
            self.input_shape,
            n_output=self.n_output,
            normalize=self.normalize,
            kernel_shape=self.kernel_shape,
            size_blocks=self.size_blocks,
            resnet=self.resnet,
            dropout=self.dropout,
            n_channels_by_block=self.n_channels_by_block,
            size_dense=self.size_dense,
            average_pooling=self.average_pooling,
            separable_conv=self.separable_conv)
        print(self.model.summary())

        self.optimizer = optimizers.Adamax(lr=self.lr) if self.optimizer == 'adamax' \
                    else optimizers.RMSprop(lr=self.lr) if self.optimizer == 'rmsprop' \
                    else optimizers.SGD(lr=self.lr, momentum=.9) if self.optimizer == 'sgd' \
                    else optimizers.Adam(lr=self.lr) if self.optimizer == 'adam' else None

        if self.zoom:
            self.datagen = ImageDataGenerator(rotation_range=10,
                                              width_shift_range=0.1,
                                              height_shift_range=0.1,
                                              zoom_range=0.1,
                                              horizontal_flip=True,
                                              fill_mode='nearest')
        elif self.shift:
            self.datagen = ImageDataGenerator(width_shift_range=0.1,
                                              height_shift_range=0.1,
                                              fill_mode='nearest')
        elif self.flip:
            self.datagen = ImageDataGenerator(horizontal_flip=bool(self.flip))
        else:
            self.datagen = None
 def __init__(self, model):
     # hyperparameters for loss terms, gamma is the discount coefficient
     self.params = {'gamma': 0.99, 'value': 0.5, 'entropy': 0.0001}
     self.model = model
     self.model.compile(
         optimizer=optimizers.RMSprop(lr=0.0007),
         # define separate losses for policy logits and value estimate
         loss=[self._logits_loss, self._value_loss])
示例#10
0
    def createRegularizedModel( self, 
                                inputs, 
                                outputs, 
                                hiddenLayers,       # List of nodes at each hidden layer
                                activationType, 
                                learningRate ):
        bias    = True
        dropout = 0
        regularizationFactor    = 0.01
        model   = Sequential()
        if len(hiddenLayers) == 0:
            model.add( Dense(self.output_size,
                                input_shape=(self.input_size,),
                                init='lecun_uniform',bias=bias) )
            model.add( Activation("linear") )
        else:
            if regularizationFactor > 0:
                model.add( Dense(hiddenLayers[0],
                                    input_shape=(self.input_size,),
                                    init='lecun_uniform',
                                    W_regularizer=l2(regularizationFactor),
                                    bias=bias) )
            else:
                model.add( Dense(hiddenLayers[0],
                                    input_shape=(self.input_size,),
                                    init='lecun_uniform',
                                    bias=bias) )
            if activationType == 'LeakyReLU':
                model.add( LeakyReLU(alpha=0.01) )
            else:
                model.add( Activation(activationType) )

            for index in range(1,len(hiddenLayers)):
                layerSize   = hiddenSize[index]
                if regularizationFactor > 0.0:
                    model.add( Dense(hiddenLayers[index],
                                        init='lecun_uniform',
                                        W_regularizer=l2(regularizationFactor),
                                        bias=bias) )
                else:
                    model.add( Dense(hiddenLayers[index],
                                    init='lecun_uniform',
                                    bias=bias) )
                if activationType == "LeakyReLU":
                    model.add( LeakyReLU(alpha=0.01) )
                else:
                    model.add( Activation(activationType) )
                if dropout > 0:
                    model.add( Dropout(dropout) )
            model.add( Dense(self.output_size,
                                init='lecun_uniform',
                                bias=bias) )
            model.add( Activation("linear") )
        optimizer   = optimizers.RMSprop( lr = learningRate, rho = 0.9, epsilon = 1e-6 )
        model.compile( loss = "mse", optimizer = optimizer )
        model.summary()
        return model
示例#11
0
    def load_fcnn():
        file_json = open("dense1.json", "r")
        load_json = file_json.read()
        file_json.close()
        load = model_from_json(load_json)
        load.load_weights("dense1.h5")

        load.compile(loss=losses.binary_crossentropy,
                     optimizer=optimizers.RMSprop(lr=0.001),
                     metrics=[metrics.binary_accuracy])
        return load
示例#12
0
    def load_cnn():
        file_json = open("emb1.json", "r")
        load_json = file_json.read()
        file_json.close()
        load = model_from_json(load_json)
        load.load_weights("emb1.h5")

        load.compile(loss='binary_crossentropy',
                     optimizer=optimizers.RMSprop(lr=1e-4),
                     metrics=['acc'])
        return load
示例#13
0
def load_model():
    # 저장된 모델이 있다면 그 모델 사용
    model = None
    if os.path.isfile('./review_model.json') and os.path.isfile('./review_model_weight.h5'):
        json_file = open("./review_model.json", "r")
        model_json = json_file.read()
        json_file.close()
        model = models.model_from_json(model_json)

        model.load_weights("./review_model_weight.h5")

        model.compile(optimizer=optimizers.RMSprop(lr=0.001),
             loss=losses.binary_crossentropy,
             metrics=[metrics.binary_accuracy])
    return model
示例#14
0
def create_model(summary):
    '''
    Формирование архитектуры сверточной нейронной сети.
    Создание сети.

    :param summary:
    True - выводить описание сети
    Felse - не выводить

    :return:
    model - объект сети

    Архитектура сети оптимизирована для изображений размера [38, 390, 1]
    '''
    model = Sequential()
    model.add(
        Conv2D(
            32,
            (3, 3),  # (7, 7) вместо (3, 3)
            # количество каналов изображения (ПРОВЕРИТЬ!)
            input_shape=IMAGE_SIZE,
            activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))  # (5, 5) вместо (3, 3)
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))  # (5, 5) вместо (3, 3)
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Conv2D(128, (3, 3), activation='relu'))
    # model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Conv2D(128, (3, 3), activation='relu'))
    # model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss=LOSS_FUNCTION,
                  optimizer=optimizers.RMSprop(lr=LR),
                  metrics=['acc'])

    if summary == True: print(model.summary())

    return model
示例#15
0
def create_gru(weights, input_shape, dropout_rate, learning_rate):
    """
    Creates a GRU based RNN using the given input parameters.

    :param weights:         The amount of output weights for GRU layer
    :param input_shape:     The shape of the inputs
    :param dropout_rate:    The dropout rate after GRU layer
    :param learning_rate:   The learning rate of the optimizer
    :return:                The compiled model
    """
    from tensorflow.python.keras import Sequential, optimizers
    from tensorflow.python.keras.layers import GRU, Dropout, Dense

    optimizer = optimizers.RMSprop(lr=learning_rate)

    model = Sequential()
    model.add(GRU(weights, input_shape=input_shape))
    model.add(Dropout(dropout_rate))
    model.add(Dense(1))
    model.compile(optimizer, loss='mse')
    return model
示例#16
0
    def optimizer(self):
        if self._optimizer == 'rms':
            if not self._optim_config:
                self._optim_config = {
                    'lr': 1e-5,
                    'decay': 0.9,
                    'rho': 0.9,
                    'epsilon': 1e-10
                }
            self._optimizer = optimizers.RMSprop(**self._optim_config)
            #self._optimizer = tf.train.RMSPropOptimizer(self._optim_config)
        elif self._optimizer == 'adam':
            if not self._optim_config:
                self._optim_config = {
                    'lr': 1e-5,
                    'beta_1': 0.9,
                    'beta_2': 0.999,
                    'epsilon': 1e-08,
                    'decay': 0.0,
                    'amsgrad': False
                }
            self._optimizer = optimizers.Adam(**self._optim_config)
            #self._optimizer = tf.train.AdamOptimizer(self._optim_config)
        elif self._optimizer == 'sgd':
            if not self._optim_config:
                self._optim_config = {
                    'lr': 1e-5,
                    'momentum': 0.0,
                    'decay': 0.8,
                    'nesterov': False
                }
            self._optimizer = optimizers.SGD(**self._optim_config)
            #self._optimizer = tf.train.GradientDescentOptimizer(self._optim_config)

        elif type(self._optimizer) not in [
                optimizers.Adam, optimizers.SGD, optimizers.RMSprop
        ]:
            logging.error('Unrecognized optimizer type')

        return self._optimizer
示例#17
0
    def instantiate_model(self):
        self.model = create_model_resnet(
            self.input_shape,
            n_output=self.n_output,
            normalize=self.normalize,
            kernel_shape=self.kernel_shape,
            size_blocks=self.size_blocks,
            resnet=self.resnet,
            dropout=self.dropout,
            n_channels_by_block=self.n_channels_by_block,
            size_dense=self.size_dense,
            average_pooling=self.average_pooling,
            separable_conv=self.separable_conv)
        print(self.model.summary())

        self.optimizer = optimizers.Adamax(lr=self.lr) if self.optimizer == 'adamax' \
                    else optimizers.RMSprop(lr=self.lr) if self.optimizer == 'rmsprop' \
                    else optimizers.SGD(lr=self.lr, momentum=.9) if self.optimizer == 'sgd' \
                    else optimizers.Adam(lr=self.lr) if self.optimizer == 'adam' else None

        # TODO
        self.datagen = None
示例#18
0
 def testRMSpropCompatibility(self):
     opt_v1 = optimizers.RMSprop()
     opt_v2 = rmsprop.RMSprop()
     self._testOptimizersCompatibility(opt_v1, opt_v2)
示例#19
0
                  padding="same"))
cifar_model.add(
    layers.Conv2D(filters=64,
                  kernel_size=[2, 2],
                  strides=1,
                  activation=activations.relu))
cifar_model.add(layers.MaxPool2D(pool_size=(2, 2)))
cifar_model.add(layers.Dropout(0.25))

cifar_model.add(layers.Flatten())
cifar_model.add(layers.Dense(units=512, activation=activations.relu))
cifar_model.add(layers.Dropout(0.5))
cifar_model.add(layers.Dense(units=num_classes,
                             activation=activations.softmax))

cifar_model.compile(optimizer=optimizers.RMSprop(lr=0.001, decay=1e-6),
                    loss=losses.categorical_crossentropy,
                    metrics=['accuracy'])

# # Run the model

# In[ ]:

if not data_augmentation:
    print('Not using data augmentation.')
    cifar_model.fit(x=x_train,
                    y=y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    validation_data=(x_test, y_test))
示例#20
0
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True,
                                   fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(150, 150),
                                                    batch_size=20,
                                                    class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(
    validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=2e-5),
              metrics=['acc'])
history = model.fit_generator(train_generator,
                              steps_per_epoch=100,
                              epochs=30,
                              validation_data=validation_generator,
                              validation_steps=50)
#保存模型
import time
now = time.strftime('%Y-%m-%d %H-%M-%S')
file_path = "E:\\1- data\\models\\" + now + " cats_and_dogs VGG16-数据增强.h5"
model.save(file_path)
#下面是绘制图像
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
示例#21
0
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))  # Dropout
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

# print(model.summary())

model.compile(optimizer=optimizers.RMSprop(lr=1e-4),
              loss='binary_crossentropy',
              metrics=['acc'])

# Read image from directories
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=40,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
示例#22
0
                return_sequences=True)(lang_gru2,
                                       initial_state=model_map_layer)

lang_out = Dense(num_words, activation='linear', name='lang_out')(lang_gru3)
language_model = Model(inputs=[image_activation_input, lang_model_input],
                       outputs=[lang_out])


def sparse_cross_entropy(y_true, y_pred):
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true,
                                                          logits=y_pred)
    loss_mean = tf.reduce_mean(loss)
    return loss_mean


optimizer = optimizers.RMSprop()
decoder_target = tf.placeholder(dtype='int32', shape=(None, None))
language_model.compile(optimizer=optimizer,
                       loss=sparse_cross_entropy,
                       target_tensors=[decoder_target])

path_checkpoint = 'model_weights.keras'
callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
                                      verbose=1,
                                      save_weights_only=True)
callback_tensorboard = TensorBoard(log_dir='./train_logs/',
                                   histogram_freq=0,
                                   write_graph=False)
callbacks = [callback_checkpoint, callback_tensorboard]

for i in range(epoch_start, epoch_end, 1):
示例#23
0
model = models.Sequential()
model.add(
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4),
              metrics=['acc'])

#利用数据增强生成器训练卷积神经网络
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=40,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(150, 150),
示例#24
0
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

# initiate RMSprop optimizer
opt = optimizers.RMSprop(lr=0.0001, decay=1e-6)

# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

if not data_augmentation:
    print('Not using data augmentation.')
    model.fit(x_train,
              y_train,
示例#25
0
train_features, train_labels = extract_features(train_dir, 2000)
val_features, val_labels = extract_features(val_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)

train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
val_features = np.reshape(val_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))

# Defining and training dense layer
from tensorflow.python.keras import models, layers, optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
              loss='binary_crossentropy',
              metrics=['acc'])

history = model.fit(train_features,
                    train_labels,
                    epochs=num_epochs,
                    batch_size=batch_size,
                    validation_data=(val_features, val_labels))

# Plotting results
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
test_data_dir = 'NYU/test/haze/'
test_label_dir = 'NYU/test/gt/'


batch_size = 1
    
if K.image_data_format() == 'channels_first':
    input_shape = (3, img_height, img_width)
else:
    input_shape = (img_height, img_width, 3)

model = get_unet(1,480,640,3, True)

######## Optimizers ########
opt = optimizers.RMSprop(lr=0.001, decay=0.0001, rho=0.9)
#opt = optimizers.Adam(lr=0.001,decay=0.0001)

model.compile(loss='mean_squared_error',optimizer=opt,metrics=['accuracy'])
model.summary()  


##################	MODEL SPECIFICATIONS #########################

epochs = 25
train_path = glob.glob(train_data_dir+'/'+'*.jpg')
label_path = glob.glob(train_label_dir+'/'+'*.jpg')

test_X = glob.glob(test_data_dir+'/'+'*.jpg')
test_Y = glob.glob(test_label_dir+'/'+'*.jpg')
示例#27
0
from tensorflow.python.keras import models,layers,optimizers
network = models.Sequential()
network.add(layers.Dense(512,activation='relu',input_shape=(28*28,)))
network.add(layers.Dense(10,activation='softmax'))
network.compile(optimizer = optimizers.RMSprop(lr=0.001),
               loss = 'mse',
               metrics = ['accuracy'])
示例#28
0
def construct_model(creative_id_num, embedding_size, max_len, RMSProp_lr,
                    model_type = "MLP", label_name = "gender"):
    '''
        构建与编译模型
    :param creative_id_num: 字典容量
    :param embedding_size: 嵌入维度
    :param max_len: 序列长度
    :param RMSProp_lr: 学习步长
    :param model_type: 模型的类型
        MLP:多层感知机
        Conv1D:1维卷积神经网络
        GlobalMaxPooling1D:1维全局池化层
        GlobalMaxPooling1D+MLP:1维全局池化层+多层感知机
        Conv1D+LSTM:1维卷积神经网络+LSTM
        Bidirectional+LSTM:双向 LSTM
    :param label_name: 标签的类型
        age : 根据年龄进行的多分类问题
        gender : 根据性别进行的二分类问题
    :return: 返回构建的模型
    '''
    print("* 构建网络")
    model = Sequential()
    model.add(Embedding(creative_id_num, embedding_size, input_length = max_len))
    if model_type == 'MLP':
        model.add(Flatten())
        model.add(Dense(8, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Dropout(0.5))
        model.add(Dense(4, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Dropout(0.5))
    elif model_type == 'Conv1D':
        model.add(Conv1D(32, 7, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Conv1D(32, 7, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(GlobalMaxPooling1D())
    elif model_type == 'GlobalMaxPooling1D':
        model.add(GlobalMaxPooling1D())
    elif model_type == 'GlobalMaxPooling1D+MLP':
        model.add(GlobalMaxPooling1D())
        model.add(Dense(64, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Dense(32, activation = 'relu', kernel_regularizer = l2(0.001)))
    elif model_type == 'LSTM':
        # model.add(LSTM(128, dropout = 0.5, recurrent_dropout = 0.5))
        model.add(LSTM(128))
    elif model_type == 'Conv1D+LSTM':
        model.add(Conv1D(32, 5, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Conv1D(32, 5, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(LSTM(16, dropout = 0.5, recurrent_dropout = 0.5))
    elif model_type == 'Bidirectional-LSTM':
        model.add(Bidirectional(LSTM(embedding_size, dropout = 0.2, recurrent_dropout = 0.2)))
    else:
        raise Exception("错误的网络模型类型")

    if label_name == "age":
        model.add(Dense(10, activation = 'softmax'))
        print("%s——模型构建完成!" % model_type)
        print("* 编译模型")
        model.compile(optimizer = optimizers.RMSprop(lr = RMSProp_lr),
                      loss = losses.sparse_categorical_crossentropy,
                      metrics = [metrics.sparse_categorical_accuracy])
    elif label_name == 'gender':
        model.add(Dense(1, activation = 'sigmoid'))
        print("%s——模型构建完成!" % model_type)
        print("* 编译模型")
        model.compile(optimizer = optimizers.RMSprop(lr = RMSProp_lr),
                      loss = losses.binary_crossentropy,
                      metrics = [metrics.binary_accuracy])
    else:
        raise Exception("错误的标签类型!")

    print(model.summary())
    return model
示例#29
0
conv_model.add(
    layers.Conv2D(32, (4, 4), activation='relu', input_shape=(100, 100, 3)))
conv_model.add(layers.MaxPooling2D((12, 12)))
conv_model.add(layers.Conv2D(48, (4, 4), activation='relu'))  # 48
conv_model.add(layers.MaxPooling2D((4, 4)))
#conv_model.add(layers.Conv)
conv_model.add(layers.Flatten())
conv_model.add(layers.Dropout(0.40))  # are also ok:0.60, 0.48
conv_model.add(layers.Dense(128, activation='relu'))  # 100
conv_model.add(layers.Dense(8, activation='softmax'))

print(conv_model.summary())

LEARNING_RATE = 1e-4
conv_model.compile(loss='categorical_crossentropy',
                   optimizer=optimizers.RMSprop(lr=LEARNING_RATE),
                   metrics=['acc'])

# ./stall_NN.py

history_conv = conv_model.fit(master_data_set,
                              master_label_set,
                              validation_split=VALIDATION_SPLIT,
                              epochs=50,
                              batch_size=24)

plt.plot(history_conv.history['loss'])
plt.plot(history_conv.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
示例#30
0
def train_and_save(model_name, X, Y, validation_rate=0.2, need_augment=False):
    """
    Train a model over given training set, then
    save the trained model as given name.
    :param model_name: the name to save the model as
    :param X: training examples.
    :param Y: corresponding desired labels.
    :param need_augment: a flag whether to perform data augmentation before training.
    """
    warnings.warn(
        'This method is deprecated, it will be removed soon. '
        'Please use functions train() or train_model() to train a model'
        'then save_model() to save the model.', DeprecationWarning)

    prefix, dataset, architect, trans_type = model_name.split('-')
    nb_examples, img_rows, img_cols, nb_channels = X.shape

    nb_validation = int(nb_examples * validation_rate)
    nb_training = nb_examples - nb_validation
    train_samples = X[:nb_training]
    train_labels = Y[:nb_training]
    val_sample = X[nb_training:]
    val_labels = Y[nb_training:]
    input_shape = (img_rows, img_cols, nb_channels)
    nb_classes = int(Y.shape[1])

    print('input_shape: {}; nb_classes: {}'.format(input_shape, nb_classes))
    print('{} training sample; {} validation samples.'.format(
        nb_training, nb_validation))

    # get corresponding model
    model = create_model(dataset,
                         input_shape=input_shape,
                         nb_classes=nb_classes)
    history = []
    scores = []
    if (need_augment):
        # normalize samples
        train_samples = data.normalize(train_samples)
        val_sample = data.normalize(val_sample)
        # data augmentation
        datagen = ImageDataGenerator(
            rotation_range=15,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
        )
        datagen.fit(train_samples)
        # define a optimizer
        opt_rms = optimizers.RMSprop(lr=0.001, decay=1e-6)
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt_rms,
                      metrics=['accuracy'])
        # perform training
        with tf.device('/device:GPU:0'):  # to run in google colab
            print("Found GPU:0")
            # train
            history = model.fit_generator(
                datagen.flow(train_samples,
                             train_labels,
                             batch_size=MODEL.BATCH_SIZE),
                steps_per_epoch=nb_training // MODEL.BATCH_SIZE,
                epochs=MODEL.EPOCHS,
                verbose=2,
                validation_data=(val_sample, val_labels),
                callbacks=[LearningRateScheduler(lr_schedule)])
            # test, this will be run with GPU
            # verbose: integer. 0 = silent; 1 = progress bar; 2 = one line per epoch
            # train the model silently
            scores = model.evaluate(val_sample,
                                    val_labels,
                                    batch_size=128,
                                    verbose=0)
    else:
        # compile model
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        with tf.device('/device:GPU:0'):
            # train
            history = model.fit(train_samples,
                                train_labels,
                                epochs=MODEL.EPOCHS,
                                batch_size=MODEL.BATCH_SIZE,
                                shuffle=True,
                                verbose=1,
                                validation_data=(val_sample, val_labels))
            # test
            # verbose: integer. 0 = silent; 1 = progress bar; 2 = one line per epoch
            # train the model silently
            scores = model.evaluate(val_sample,
                                    val_labels,
                                    batch_size=128,
                                    verbose=0)

    # save the trained model
    model.save('{}/{}.h5'.format(PATH.MODEL, model_name))
    keras.models.save_model(model, '{}/{}_2.h5'.format(PATH.MODEL, model_name))
    # report
    print('Trained model has been saved to data/{}'.format(model_name))
    print('Test accuracy: {:.4f}; loss: {:.4f}'.format(scores[1], scores[0]))
    file_name = 'CheckPoint-{}-{}-{}.csv'.format(dataset, architect,
                                                 trans_type)
    file.dict2csv(history.history, '{}/{}'.format(PATH.RESULTS, file_name))
    plotTrainingResult(history, model_name)
    # delete the model after it's been saved.
    del model