Exemplo n.º 1
0
def model_main(result_sds, project_id, result_dir, train_data_dir,
               validation_data_dir, nb_train_samples, nb_validation_samples,
               input_shape, img_width, img_height, epochs, batch_size):
    # 通过train_data_dir下的文件夹数目得到分类数量
    l = os.listdir(train_data_dir)
    l.remove('.DS_Store')
    num_classes = len(l)
    if num_classes < 2:
        raise Exception('classes should be more than 1, put your '
                        'different classes images file into '
                        'different folder')
    # load the Xception network
    base_model = applications.Xception(weights='imagenet',
                                       include_top=False,
                                       input_shape=input_shape)

    # build the top of cnn network
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    # top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))

    if num_classes == 2:
        top_model.add(Dense(1, activation='sigmoid'))
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
        # set the first 25 layers (up to the last conv block)
        # to non-trainable (weights will not be updated)
        for layer in model.layers[:-2]:
            layer.trainable = False

        model.compile(loss='binary_crossentropy',
                      optimizer='rmsprop',
                      metrics=[
                          'accuracy', custom_metrcis.matthews_correlation,
                          custom_metrcis.precision, custom_metrcis.recall,
                          custom_metrcis.fmeasure
                      ])
    else:
        top_model.add(Dense(num_classes, activation='softmax'))
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
        for layer in model.layers[:-2]:
            layer.trainable = False
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

    # this is the augmentation configuration we will use for training
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)
    # this is the augmentation configuration we will use for testing:
    # only rescaling
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    if num_classes == 2:
        class_mode = 'binary'
    else:
        class_mode = 'categorical'

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=class_mode)
    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=class_mode)
    # callback to save metrics
    batch_print_callback = LambdaCallback(
        on_epoch_begin=lambda epoch, logs: logger_service.log_epoch_begin(
            epoch, logs, result_sds, project_id),
        on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
            epoch, logs, result_sds, project_id),
        on_batch_end=lambda batch, logs: logger_service.log_batch_end(
            batch, logs, result_sds, project_id))
    # checkpoint to save best weight
    best_checkpoint = MyModelCheckpoint(os.path.abspath(
        os.path.join(result_dir, 'best.hdf5')),
                                        save_weights_only=True,
                                        verbose=1,
                                        save_best_only=True)
    # checkpoint to save latest weight
    general_checkpoint = MyModelCheckpoint(os.path.abspath(
        os.path.join(result_dir, 'latest.hdf5')),
                                           save_weights_only=True,
                                           verbose=1)
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size,
        callbacks=[batch_print_callback, best_checkpoint, general_checkpoint],
    )
    # model.save_weights('first_try.h5')
    config = model.get_config()
    logger_service.log_train_end(
        result_sds,
        model_config=config,
        # score=score,
        history=history.history)
    keras_saved_model.save_model(result_dir, model)

    return {'history': history.history}
Exemplo n.º 2
0
def mnist_mlp(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    num_classes = y_train.shape[1]

    # 获取 img 的格式
    x_train_shape = x_train.shape
    if x_train_shape[1] > 3:
        # 格式为 (None, img_rows, img_cols, 1)
        x_train = x_train.reshape(-1, x_train_shape[1] * x_train_shape[2])
        x_test = x_test.reshape(-1, x_train_shape[1] * x_train_shape[2])
        x_val = x_test
    else:
        # 格式为 (None, 1, img_rows, img_cols)
        x_train = x_train.reshape(-1, x_train_shape[2] * x_train_shape[3])
        x_test = x_test.reshape(-1, x_train_shape[2] * x_train_shape[3])
        x_val = x_test
    # print(x_train.shape)
    with graph.as_default():
        model = Sequential()
        model.add(
            Dense(512, activation='relu', input_shape=(x_train.shape[1], )))
        model.add(Dropout(0.2))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=RMSprop(),
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}
Exemplo n.º 3
0
def mnist_irnn(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']

    x_train = x_train.reshape(x_train.shape[0], -1, 1)
    x_test = x_test.reshape(x_test.shape[0], -1, 1)
    x_val = x_test
    x_train_shape = x_train.shape
    input_shape = x_train_shape[1:]
    num_classes = y_train.shape[1]
    hidden_units = 100
    learning_rate = 1e-6

    with graph.as_default():
        model = Sequential()
        model.add(
            SimpleRNN(
                hidden_units,
                kernel_initializer=initializers.RandomNormal(stddev=0.001),
                recurrent_initializer=initializers.Identity(gain=1.0),
                activation='relu',
                input_shape=input_shape))
        model.add(Dense(num_classes))
        model.add(Activation('softmax'))
        rmsprop = RMSprop(lr=learning_rate)
        model.compile(loss='categorical_crossentropy',
                      optimizer=rmsprop,
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}
Exemplo n.º 4
0
def convnet(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    input_shape = x_train.shape[1:]
    output_units = y_train.shape[-1]

    with graph.as_default():
        model = Sequential()
        model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(output_units, activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        model.compile(loss='categorical_crossentropy',
                      optimizer=sgd,
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(on_epoch_end=
                                              lambda epoch, logs:
                                              logger_service.log_epoch_end(epoch, logs,
                                                                   result_sds,
                                                                    project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds, verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train, y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[batch_print_callback, best_checkpoint,
                                       general_checkpoint],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])

        config = model.get_config()
        logger_service.log_train_end(result_sds,
                             model_config=config,
                             score=score,
                             history=history.history)

        return {'score': score, 'history': history.history}
Exemplo n.º 5
0
def imdb_lstm(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    max_features = input['max_features']
    maxlen = input['maxlen']
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    x_val = x_test

    with graph.as_default():
        model = Sequential()
        model.add(Embedding(max_features, 128))
        model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
        model.add(Dense(1, activation='sigmoid'))

        # try using different optimizers and different optimizer configs
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}
Exemplo n.º 6
0
def imdb_fasttext(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    # Set parameters:
    # ngram_range = 2 will add bi-grams features
    ngram_range = input['ngram_range']
    max_features = input['max_features']
    maxlen = input['maxlen']
    embedding_dims = 50

    def create_ngram_set(input_list, ngram_value=2):
        """
        Extract a set of n-grams from a list of integers.
         create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
        {(4, 9), (4, 1), (1, 4), (9, 4)}
        create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
        [(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
        """
        return set(zip(*[input_list[i:] for i in range(ngram_value)]))

    def add_ngram(sequences, token_indice, ngram_range=2):
        """
        Augment the input list of list (sequences) by appending n-grams values.
        Example: adding bi-gram
            sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
            token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017}
            add_ngram(sequences, token_indice, ngram_range=2)
            [[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42]]
        Example: adding tri-gram
            sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
            token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017, (7, 9,
            2): 2018}
            add_ngram(sequences, token_indice, ngram_range=3)
            [[1, 3, 4, 5, 1337], [1, 3, 7, 9, 2, 1337, 2018]]
        """
        new_sequences = []
        for input_list in sequences:
            new_list = input_list[:]
            for i in range(len(new_list) - ngram_range + 1):
                for ngram_value in range(2, ngram_range + 1):
                    ngram = tuple(new_list[i:i + ngram_value])
                    if ngram in token_indice:
                        new_list.append(token_indice[ngram])
            new_sequences.append(new_list)

        return new_sequences

    if ngram_range > 1:
        print('Adding {}-gram features'.format(ngram_range))
        # Create set of unique n-gram from the training set.
        ngram_set = set()
        for input_list in x_train:
            for i in range(2, ngram_range + 1):
                set_of_ngram = create_ngram_set(input_list, ngram_value=i)
                ngram_set.update(set_of_ngram)

        # Dictionary mapping n-gram token to a unique integer.
        # Integer values are greater than max_features in order
        # to avoid collision with existing features.
        start_index = max_features + 1
        token_indice = {v: k + start_index for k, v in enumerate(ngram_set)}
        indice_token = {token_indice[k]: k for k in token_indice}

        # max_features is the highest integer that could be found in the
        # dataset.
        max_features = np.max(list(indice_token.keys())) + 1

        # Augmenting x_train and x_test with n-grams features
        x_train = add_ngram(x_train, token_indice, ngram_range)
        x_test = add_ngram(x_test, token_indice, ngram_range)
        # print('Average train sequence length: {}'.format(
        #     np.mean(list(map(len, x_train)), dtype=int)))
        # print('Average test sequence length: {}'.format(
        #     np.mean(list(map(len, x_test)), dtype=int)))

    print('Pad sequences (samples x time)')
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    x_val = x_test
    print('x_train shape:', x_train.shape)
    print('x_test shape:', x_test.shape)

    with graph.as_default():
        model = Sequential()
        # we start off with an efficient embedding layer which maps
        # our vocab indices into embedding_dims dimensions
        model.add(Embedding(max_features,
                            embedding_dims,
                            input_length=maxlen))

        # we add a GlobalAveragePooling1D, which will average the embeddings
        # of all words in the document
        model.add(GlobalAveragePooling1D())

        # We project onto a single unit output layer, and squash it with a
        # sigmoid:
        model.add(Dense(1, activation='sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(on_epoch_end=
                                              lambda epoch, logs:
                                              logger_service.log_epoch_end(
                                                  epoch, logs,
                                                  result_sds,
                                                  project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train, y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[batch_print_callback, best_checkpoint,
                                       general_checkpoint],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {
            'score': score,
            'history': history.history}
Exemplo n.º 7
0
def imdb_cnn_lstm(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']

    # Embedding
    embedding_size = 128
    max_features = input['max_features']
    maxlen = input['maxlen']
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    x_val = x_test
    # set parameters:

    # Convolution
    kernel_size = 5
    filters = 64
    pool_size = 4

    # LSTM
    lstm_output_size = 70

    with graph.as_default():
        model = Sequential()
        model.add(Embedding(max_features, embedding_size, input_length=maxlen))
        model.add(Dropout(0.25))
        model.add(Conv1D(filters,
                         kernel_size,
                         padding='valid',
                         activation='relu',
                         strides=1))
        model.add(MaxPooling1D(pool_size=pool_size))
        model.add(LSTM(lstm_output_size))
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(on_epoch_end=
                                              lambda epoch, logs:
                                              logger_service.log_epoch_end(
                                                  epoch, logs,
                                                  result_sds,
                                                  project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train, y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[batch_print_callback, best_checkpoint,
                                       general_checkpoint],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {
            'score': score,
            'history': history.history}
Exemplo n.º 8
0
def imdb_cnn(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    max_features = input['max_features']
    maxlen = input['maxlen']
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    x_val = x_test
    # set parameters:
    embedding_dims = 50
    filters = 250
    kernel_size = 3
    hidden_dims = 250

    with graph.as_default():
        model = Sequential()
        model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
        model.add(Dropout(0.2))

        # we add a Convolution1D, which will learn filters
        # word group filters of size filter_length:
        model.add(
            Conv1D(filters,
                   kernel_size,
                   padding='valid',
                   activation='relu',
                   strides=1))
        # we use max pooling:
        model.add(GlobalMaxPooling1D())

        # We add a vanilla hidden layer:
        model.add(Dense(hidden_dims))
        model.add(Dropout(0.2))
        model.add(Activation('relu'))

        # We project onto a single unit output layer, and squash it with a
        # sigmoid:
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}