Exemple #1
0
def keras_seq(conf, input, **kw):
    """
    a general implementation of sequential model of keras
    :param conf: config dict
    :return:
    """
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    job_id = kw.pop('job_id', None)
    project = project_business.get_by_id(project_id)
    ow = ownership_business.get_ownership_by_owned_item(project, 'project')
    user_ID = ow.user.user_ID
    print('conf')
    print(conf)
    result_dir = kw.pop('result_dir', None)
    if result_sds is None:
        raise RuntimeError('no result sds id passed to model')
    if project_id is None:
        raise RuntimeError('no project id passed to model')

    with graph.as_default():
        model = Sequential()

        ls = conf['layers']
        comp = conf['compile']
        f = conf['fit']
        e = conf['evaluate']
        x_train = input['x_tr']
        y_train = input['y_tr']
        x_val = input['x_te']
        y_val = input['y_te']
        x_test = input['x_te']
        y_test = input['y_te']

        training_logger = logger_service.TrainingLogger(f['args']['epochs'],
                                                        project_id,
                                                        job_id,
                                                        user_ID,
                                                        result_sds)

        # TODO add validator
        # op = comp['optimizer']

        # loop to add layers
        for l in ls:
            # get layer class from keras
            layer_class = getattr(layers, l['name'])
            # add layer
            model.add(layer_class(**l['args']))

        # optimiser
        # sgd_class = getattr(optimizers, op['name'])
        # sgd = sgd_class(**op['args'])

        # define the metrics
        # compile
        model.compile(**comp['args'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(on_epoch_begin=
                                              lambda epoch, logs:
                                              training_logger.log_epoch_begin(
                                                  epoch, logs),
                                              on_epoch_end=
                                              lambda epoch, logs:
                                              training_logger.log_epoch_end(
                                                  epoch, logs),
                                              on_batch_end=
                                              lambda batch, logs:
                                              training_logger.log_batch_end(
                                                  batch, logs)
                                              )

        # checkpoint to save best weight
        best_checkpoint = MyModelCheckpoint(
            os.path.abspath(os.path.join(result_dir, 'best.hdf5')),
            save_weights_only=True,
            verbose=1, save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MyModelCheckpoint(
            os.path.abspath(os.path.join(result_dir, 'latest.hdf5')),
            save_weights_only=True,
            verbose=1)

        # training
        history = model.fit(x_train, y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[batch_print_callback, best_checkpoint,
                                       general_checkpoint],
                            verbose=0,
                            **f['args'])

        # testing
        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)
        keras_saved_model.save_model(result_dir, model)
        return {'score': score, 'history': history.history}
def model_main(result_sds, project_id, result_dir, train_data_dir,
               validation_data_dir, nb_train_samples, nb_validation_samples,
               input_shape, img_width, img_height, epochs, batch_size):
    # 通过train_data_dir下的文件夹数目得到分类数量
    l = os.listdir(train_data_dir)
    l.remove('.DS_Store')
    num_classes = len(l)
    if num_classes < 2:
        raise Exception('classes should be more than 1, put your '
                        'different classes images file into '
                        'different folder')
    # load the Xception network
    base_model = applications.Xception(weights='imagenet',
                                       include_top=False,
                                       input_shape=input_shape)

    # build the top of cnn network
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    # top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))

    if num_classes == 2:
        top_model.add(Dense(1, activation='sigmoid'))
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
        # set the first 25 layers (up to the last conv block)
        # to non-trainable (weights will not be updated)
        for layer in model.layers[:-2]:
            layer.trainable = False

        model.compile(loss='binary_crossentropy',
                      optimizer='rmsprop',
                      metrics=[
                          'accuracy', custom_metrcis.matthews_correlation,
                          custom_metrcis.precision, custom_metrcis.recall,
                          custom_metrcis.fmeasure
                      ])
    else:
        top_model.add(Dense(num_classes, activation='softmax'))
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
        for layer in model.layers[:-2]:
            layer.trainable = False
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

    # this is the augmentation configuration we will use for training
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)
    # this is the augmentation configuration we will use for testing:
    # only rescaling
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    if num_classes == 2:
        class_mode = 'binary'
    else:
        class_mode = 'categorical'

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=class_mode)
    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=class_mode)
    # callback to save metrics
    batch_print_callback = LambdaCallback(
        on_epoch_begin=lambda epoch, logs: logger_service.log_epoch_begin(
            epoch, logs, result_sds, project_id),
        on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
            epoch, logs, result_sds, project_id),
        on_batch_end=lambda batch, logs: logger_service.log_batch_end(
            batch, logs, result_sds, project_id))
    # checkpoint to save best weight
    best_checkpoint = MyModelCheckpoint(os.path.abspath(
        os.path.join(result_dir, 'best.hdf5')),
                                        save_weights_only=True,
                                        verbose=1,
                                        save_best_only=True)
    # checkpoint to save latest weight
    general_checkpoint = MyModelCheckpoint(os.path.abspath(
        os.path.join(result_dir, 'latest.hdf5')),
                                           save_weights_only=True,
                                           verbose=1)
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size,
        callbacks=[batch_print_callback, best_checkpoint, general_checkpoint],
    )
    # model.save_weights('first_try.h5')
    config = model.get_config()
    logger_service.log_train_end(
        result_sds,
        model_config=config,
        # score=score,
        history=history.history)
    keras_saved_model.save_model(result_dir, model)

    return {'history': history.history}
Exemple #3
0
def mlp_main(result_sds,
             project_id,
             job_id,
             user_ID,
             result_dir,
             x_train,
             y_train,
             x_val,
             y_val,
             x_test,
             y_test,
             f=None,
             e=None):

    training_logger = logger_service.TrainingLogger(f['args']['epochs'],
                                                    project_id, job_id,
                                                    user_ID, result_sds)
    input_len = x_train.shape[1]
    output_len = y_train.shape[1]

    model = Sequential()

    # Dense(64) is a fully-connected layer with 64 hidden units.
    # in the first layer, you must specify the expected input data shape:
    # here, 20-dimensional vectors.
    model.add(Dense(64, activation='relu', input_dim=input_len))
    model.add(Dropout(0.5))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(output_len, activation='softmax'))

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    # callback to save metrics
    # TODO custom to make database writing async
    batch_print_callback = LambdaCallback(
        on_epoch_begin=lambda epoch, logs: training_logger.log_epoch_begin(
            epoch, logs),
        on_epoch_end=lambda epoch, logs: training_logger.log_epoch_end(
            epoch, logs),
        on_batch_end=lambda batch, logs: training_logger.log_batch_end(
            batch, logs))

    # checkpoint to save best weight
    best_checkpoint = MyModelCheckpoint(os.path.abspath(
        os.path.join(result_dir, 'best.hdf5')),
                                        save_weights_only=True,
                                        verbose=1,
                                        save_best_only=True)
    # checkpoint to save latest weight
    general_checkpoint = MyModelCheckpoint(os.path.abspath(
        os.path.join(result_dir, 'latest.hdf5')),
                                           save_weights_only=True,
                                           verbose=1)

    # training
    history = model.fit(
        x_train,
        y_train,
        validation_data=(x_val, y_val),
        callbacks=[batch_print_callback, best_checkpoint, general_checkpoint],
        verbose=1,
        **f['args'])

    score = model.evaluate(x_test, y_test, **e['args'])
    # weights = model.get_weights()
    config = model.get_config()
    logger_service.log_train_end(result_sds,
                                 model_config=config,
                                 score=score,
                                 history=history.history)

    keras_saved_model.save_model(result_dir, model)
    return {'score': score, 'history': history.history}