Пример #1
0
def image_classifier_xception(conf, input, **kw):
    # extract conf
    f = conf['fit']['args']
    e = conf['evaluate']['args']
    epochs = f['epochs']
    batch_size = f['batch_size']
    # extract kw
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    result_dir = kw.pop('result_dir', None)
    # extract input
    train_data_dir = input['train_data_dir']
    validation_data_dir = input['validation_data_dir']
    nb_train_samples = input['nb_train_samples']
    nb_validation_samples = input['nb_validation_samples']

    # dimensions of our images.
    # use 150, 150 as default
    img_width, img_height = 150, 150

    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_width, img_height)
    else:
        input_shape = (img_width, img_height, 3)

    with graph.as_default():
        return model_main(result_sds, project_id, result_dir, train_data_dir,
                          validation_data_dir, nb_train_samples,
                          nb_validation_samples, input_shape, img_width,
                          img_height, epochs, batch_size)
Пример #2
0
def keras_fmin_fnct(space):

    with graph.as_default():
        model = Sequential()
        model.add(Dense(units=space['units'], activation=space['activation'], input_shape=[4]))
        model.add(Dropout(rate=space['rate']))
        model.add(Dense(units=2, activation='softmax'))
        model.compile(optimizer=SGD(lr=space['lr']), loss=space['loss'], metrics=['acc'])
        model.fit(x_train, y_train,  validation_data=(x_test, y_test), batch_size=128, epochs=10, verbose=0)
        score, acc = model.evaluate(x_test, y_test, batch_size=128)
        return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #3
0
def mlp(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    result_dir = kw.pop('result_dir', None)
    job_id = kw.pop('job_id', None)
    project = project_business.get_by_id(project_id)
    ow = ownership_business.get_ownership_by_owned_item(project, 'project')
    user_ID = ow.user.user_ID
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']

    with graph.as_default():
        return mlp_main(result_sds, project_id, job_id, user_ID, result_dir,
                        x_train, y_train, x_val, y_val, x_test, y_test, f, e)
Пример #4
0
def export(job_id, user_ID):
    """
    export model for tf serving
    :param job_id: str/ObjectId
    :return:
    """
    result_dir, h5_filename = get_results_dir_by_job_id(job_id, user_ID)
    # result_sds = staging_data_set_business.get_by_job_id(job_id)
    model_dir = os.path.join(result_dir + '/', 'model.json')
    weights_dir = os.path.join(result_dir + '/', h5_filename)
    with open(model_dir, 'r') as f:
        data = json.load(f)
        json_string = json.dumps(data)
        # new_g = tf.Graph()
        with graph.as_default():
            model = model_from_json(json_string)
            # model.load_weights(weights_dir)
            # working_dir = MODEL_EXPORT_BASE
            # export_base_path = os.path.join(working_dir, str(result_sds.id))
            version = keras_saved_model.export(model, result_dir, weights_dir)
            return result_dir, version
Пример #5
0
def mnist_irnn(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']

    x_train = x_train.reshape(x_train.shape[0], -1, 1)
    x_test = x_test.reshape(x_test.shape[0], -1, 1)
    x_val = x_test
    x_train_shape = x_train.shape
    input_shape = x_train_shape[1:]
    num_classes = y_train.shape[1]
    hidden_units = 100
    learning_rate = 1e-6

    with graph.as_default():
        model = Sequential()
        model.add(
            SimpleRNN(
                hidden_units,
                kernel_initializer=initializers.RandomNormal(stddev=0.001),
                recurrent_initializer=initializers.Identity(gain=1.0),
                activation='relu',
                input_shape=input_shape))
        model.add(Dense(num_classes))
        model.add(Activation('softmax'))
        rmsprop = RMSprop(lr=learning_rate)
        model.compile(loss='categorical_crossentropy',
                      optimizer=rmsprop,
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}
Пример #6
0
def mnist_mlp(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    num_classes = y_train.shape[1]

    # 获取 img 的格式
    x_train_shape = x_train.shape
    if x_train_shape[1] > 3:
        # 格式为 (None, img_rows, img_cols, 1)
        x_train = x_train.reshape(-1, x_train_shape[1] * x_train_shape[2])
        x_test = x_test.reshape(-1, x_train_shape[1] * x_train_shape[2])
        x_val = x_test
    else:
        # 格式为 (None, 1, img_rows, img_cols)
        x_train = x_train.reshape(-1, x_train_shape[2] * x_train_shape[3])
        x_test = x_test.reshape(-1, x_train_shape[2] * x_train_shape[3])
        x_val = x_test
    # print(x_train.shape)
    with graph.as_default():
        model = Sequential()
        model.add(
            Dense(512, activation='relu', input_shape=(x_train.shape[1], )))
        model.add(Dropout(0.2))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=RMSprop(),
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}
Пример #7
0
def convnet(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    input_shape = x_train.shape[1:]
    output_units = y_train.shape[-1]

    with graph.as_default():
        model = Sequential()
        model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(output_units, activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        model.compile(loss='categorical_crossentropy',
                      optimizer=sgd,
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(on_epoch_end=
                                              lambda epoch, logs:
                                              logger_service.log_epoch_end(epoch, logs,
                                                                   result_sds,
                                                                    project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds, verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train, y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[batch_print_callback, best_checkpoint,
                                       general_checkpoint],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])

        config = model.get_config()
        logger_service.log_train_end(result_sds,
                             model_config=config,
                             score=score,
                             history=history.history)

        return {'score': score, 'history': history.history}
Пример #8
0
def keras_seq(conf, input, **kw):
    """
    a general implementation of sequential model of keras
    :param conf: config dict
    :return:
    """
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    job_id = kw.pop('job_id', None)
    project = project_business.get_by_id(project_id)
    ow = ownership_business.get_ownership_by_owned_item(project, 'project')
    user_ID = ow.user.user_ID
    print('conf')
    print(conf)
    result_dir = kw.pop('result_dir', None)
    if result_sds is None:
        raise RuntimeError('no result sds id passed to model')
    if project_id is None:
        raise RuntimeError('no project id passed to model')

    with graph.as_default():
        model = Sequential()

        ls = conf['layers']
        comp = conf['compile']
        f = conf['fit']
        e = conf['evaluate']
        x_train = input['x_tr']
        y_train = input['y_tr']
        x_val = input['x_te']
        y_val = input['y_te']
        x_test = input['x_te']
        y_test = input['y_te']

        training_logger = logger_service.TrainingLogger(f['args']['epochs'],
                                                        project_id,
                                                        job_id,
                                                        user_ID,
                                                        result_sds)

        # TODO add validator
        # op = comp['optimizer']

        # loop to add layers
        for l in ls:
            # get layer class from keras
            layer_class = getattr(layers, l['name'])
            # add layer
            model.add(layer_class(**l['args']))

        # optimiser
        # sgd_class = getattr(optimizers, op['name'])
        # sgd = sgd_class(**op['args'])

        # define the metrics
        # compile
        model.compile(**comp['args'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(on_epoch_begin=
                                              lambda epoch, logs:
                                              training_logger.log_epoch_begin(
                                                  epoch, logs),
                                              on_epoch_end=
                                              lambda epoch, logs:
                                              training_logger.log_epoch_end(
                                                  epoch, logs),
                                              on_batch_end=
                                              lambda batch, logs:
                                              training_logger.log_batch_end(
                                                  batch, logs)
                                              )

        # checkpoint to save best weight
        best_checkpoint = MyModelCheckpoint(
            os.path.abspath(os.path.join(result_dir, 'best.hdf5')),
            save_weights_only=True,
            verbose=1, save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MyModelCheckpoint(
            os.path.abspath(os.path.join(result_dir, 'latest.hdf5')),
            save_weights_only=True,
            verbose=1)

        # training
        history = model.fit(x_train, y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[batch_print_callback, best_checkpoint,
                                       general_checkpoint],
                            verbose=0,
                            **f['args'])

        # testing
        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)
        keras_saved_model.save_model(result_dir, model)
        return {'score': score, 'history': history.history}
Пример #9
0
def neural_style_transfer(args, project_id, file_url):
    # Path to the image to transform.
    base_image_path = args.get('base_image_path')
    # Path to the style reference image.
    style_reference_image_path = args.get('style_reference_image_path')
    # Prefix for the saved results.
    result_prefix = args.get('result_prefix')
    # Number of iterations to run.
    iterations = args.get('iter', 3)
    # these are the weights of the different loss components
    # content_weight
    content_weight = args.get('content_weight', 0.025)
    # Style weight.
    style_weight = args.get('style_weight', 2.0)
    # Total Variation weight.
    total_variation_weight = args.get('tv_weight', 1.0)

    # dimensions of the generated picture.
    width, height = load_img(base_image_path).size
    img_nrows = 400
    img_ncols = int(width * img_nrows / height)

    with graph.as_default():
        # this Evaluator class makes it possible
        # to compute loss and gradients in one pass
        # while retrieving them via two separate functions,
        # "loss" and "grads". This is done because scipy.optimize
        # requires separate functions for loss and gradients,
        # but computing them separately would be inefficient.
        class Evaluator(object):
            def __init__(self):
                self.loss_value = None
                self.grads_values = None

            def loss(self, x):
                assert self.loss_value is None
                loss_value, grad_values = eval_loss_and_grads(x)
                self.loss_value = loss_value
                self.grad_values = grad_values
                return self.loss_value

            def grads(self, x):
                assert self.loss_value is not None
                grad_values = np.copy(self.grad_values)
                self.loss_value = None
                self.grad_values = None
                return grad_values

        # util function to open, resize and format pictures into appropriate
        # tensors
        # 此步骤将img的channel顺序由RGB转到了BGR
        # 主要是因为 vgg模型当时是用caffe训练的,使用了opencv来加载图像,
        # 而opencv的加载顺序是 BGR
        # 结果是 VGG 的输入图像需要转换到 BGR模式
        def preprocess_image(image_path):
            img = load_img(image_path, target_size=(img_nrows, img_ncols))
            img = img_to_array(img)
            img = np.expand_dims(img, axis=0)
            img = vgg19.preprocess_input(img)
            return img

        # util function to convert a tensor into a valid image
        # 此步骤又从BGR转换回 RGB
        def deprocess_image(x):
            if K.image_data_format() == 'channels_first':
                x = x.reshape((3, img_nrows, img_ncols))
                x = x.transpose((1, 2, 0))
            else:
                x = x.reshape((img_nrows, img_ncols, 3))
            # Remove zero-center by mean pixel
            x[:, :, 0] += 103.939
            x[:, :, 1] += 116.779
            x[:, :, 2] += 123.68
            # 'BGR'->'RGB'
            x = x[:, :, ::-1]
            x = np.clip(x, 0, 255).astype('uint8')
            return x

        # compute the neural style loss
        # first we need to define 4 util functions

        # the gram matrix of an image tensor (feature-wise outer product)
        def gram_matrix(x):
            assert K.ndim(x) == 3
            if K.image_data_format() == 'channels_first':
                features = K.batch_flatten(x)
            else:
                features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
            gram = K.dot(features, K.transpose(features))
            return gram

        # the "style loss" is designed to maintain
        # the style of the reference image in the generated image.
        # It is based on the gram matrices (which capture style) of
        # feature maps from the style reference image
        # and from the generated image
        def style_loss(style, combination):
            assert K.ndim(style) == 3
            assert K.ndim(combination) == 3
            S = gram_matrix(style)
            C = gram_matrix(combination)
            channels = 3
            size = img_nrows * img_ncols
            return K.sum(K.square(S - C)) / (4. * (channels**2) * (size**2))

        # an auxiliary loss function
        # designed to maintain the "content" of the
        # base image in the generated image
        def content_loss(base, combination):
            return K.sum(K.square(combination - base))

        # the 3rd loss function, total variation loss,
        # designed to keep the generated image locally coherent
        def total_variation_loss(x):
            assert K.ndim(x) == 4
            if K.image_data_format() == 'channels_first':
                a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
                             x[:, :, 1:, :img_ncols - 1])
                b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
                             x[:, :, :img_nrows - 1, 1:])
            else:
                a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
                             x[:, 1:, :img_ncols - 1, :])
                b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
                             x[:, :img_nrows - 1, 1:, :])
            return K.sum(K.pow(a + b, 1.25))

        def eval_loss_and_grads(x):
            if K.image_data_format() == 'channels_first':
                x = x.reshape((1, 3, img_nrows, img_ncols))
            else:
                x = x.reshape((1, img_nrows, img_ncols, 3))
            outs = f_outputs([x])
            loss_value = outs[0]
            if len(outs[1:]) == 1:
                grad_values = outs[1].flatten().astype('float64')
            else:
                grad_values = np.array(outs[1:]).flatten().astype('float64')
            return loss_value, grad_values

        # get tensor representations of our images
        base_image = K.variable(preprocess_image(base_image_path))
        style_reference_image = K.variable(
            preprocess_image(style_reference_image_path))

        # this will contain our generated image
        if K.image_data_format() == 'channels_first':
            combination_image = K.placeholder((1, 3, img_nrows, img_ncols))
        else:
            combination_image = K.placeholder((1, img_nrows, img_ncols, 3))

        # combine the 3 images into a single Keras tensor
        input_tensor = K.concatenate(
            [base_image, style_reference_image, combination_image], axis=0)

        # build the VGG16 network with our 3 images as input
        # the model will be loaded with pre-trained ImageNet weights
        model = vgg19.VGG19(input_tensor=input_tensor,
                            weights='imagenet',
                            include_top=False)
        print('Model loaded.')

        # get the symbolic outputs of each "key" layer (we gave them unique names).
        outputs_dict = dict([(layer.name, layer.output)
                             for layer in model.layers])

        # combine these loss functions into a single scalar
        loss = K.variable(0.)
        layer_features = outputs_dict['block5_conv2']
        base_image_features = layer_features[0, :, :, :]
        combination_features = layer_features[2, :, :, :]
        loss += content_weight * content_loss(base_image_features,
                                              combination_features)

        feature_layers = [
            'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
            'block5_conv1'
        ]
        for layer_name in feature_layers:
            layer_features = outputs_dict[layer_name]
            style_reference_features = layer_features[1, :, :, :]
            combination_features = layer_features[2, :, :, :]
            sl = style_loss(style_reference_features, combination_features)
            loss += (style_weight / len(feature_layers)) * sl
        loss += total_variation_weight * total_variation_loss(
            combination_image)

        # get the gradients of the generated image wrt the loss
        grads = K.gradients(loss, combination_image)

        outputs = [loss]
        if isinstance(grads, (list, tuple)):
            outputs += grads
        else:
            outputs.append(grads)

        f_outputs = K.function([combination_image], outputs)

        evaluator = Evaluator()

        # run scipy-based optimization (L-BFGS) over the pixels of the generated
        #  image
        # so as to minimize the neural style loss
        x = preprocess_image(base_image_path)
        url = ''
        for i in range(iterations):
            print('Start of iteration', i)
            start_time = time.time()
            x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                             x.flatten(),
                                             fprime=evaluator.grads,
                                             maxfun=20)
            print('Current loss value:', min_val)
            # save current generated image
            img = deprocess_image(x.copy())
            fname = result_prefix + '_at_iteration_%d.png' % i
            imsave(fname, img)
            end_time = time.time()
            url = file_url + 'result_at_iteration_{}.png?predict=true'.format(
                i)
            logger_service.emit_message_url({'url': url, 'n': i}, project_id)
            print('Image saved as', fname)
            print('Iteration %d completed in %ds' % (i, end_time - start_time))
        return {'url': url, 'n': iterations}
Пример #10
0
def imdb_lstm(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    max_features = input['max_features']
    maxlen = input['maxlen']
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    x_val = x_test

    with graph.as_default():
        model = Sequential()
        model.add(Embedding(max_features, 128))
        model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
        model.add(Dense(1, activation='sigmoid'))

        # try using different optimizers and different optimizer configs
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}
Пример #11
0
def imdb_fasttext(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    # Set parameters:
    # ngram_range = 2 will add bi-grams features
    ngram_range = input['ngram_range']
    max_features = input['max_features']
    maxlen = input['maxlen']
    embedding_dims = 50

    def create_ngram_set(input_list, ngram_value=2):
        """
        Extract a set of n-grams from a list of integers.
         create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
        {(4, 9), (4, 1), (1, 4), (9, 4)}
        create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
        [(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
        """
        return set(zip(*[input_list[i:] for i in range(ngram_value)]))

    def add_ngram(sequences, token_indice, ngram_range=2):
        """
        Augment the input list of list (sequences) by appending n-grams values.
        Example: adding bi-gram
            sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
            token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017}
            add_ngram(sequences, token_indice, ngram_range=2)
            [[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42]]
        Example: adding tri-gram
            sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
            token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017, (7, 9,
            2): 2018}
            add_ngram(sequences, token_indice, ngram_range=3)
            [[1, 3, 4, 5, 1337], [1, 3, 7, 9, 2, 1337, 2018]]
        """
        new_sequences = []
        for input_list in sequences:
            new_list = input_list[:]
            for i in range(len(new_list) - ngram_range + 1):
                for ngram_value in range(2, ngram_range + 1):
                    ngram = tuple(new_list[i:i + ngram_value])
                    if ngram in token_indice:
                        new_list.append(token_indice[ngram])
            new_sequences.append(new_list)

        return new_sequences

    if ngram_range > 1:
        print('Adding {}-gram features'.format(ngram_range))
        # Create set of unique n-gram from the training set.
        ngram_set = set()
        for input_list in x_train:
            for i in range(2, ngram_range + 1):
                set_of_ngram = create_ngram_set(input_list, ngram_value=i)
                ngram_set.update(set_of_ngram)

        # Dictionary mapping n-gram token to a unique integer.
        # Integer values are greater than max_features in order
        # to avoid collision with existing features.
        start_index = max_features + 1
        token_indice = {v: k + start_index for k, v in enumerate(ngram_set)}
        indice_token = {token_indice[k]: k for k in token_indice}

        # max_features is the highest integer that could be found in the
        # dataset.
        max_features = np.max(list(indice_token.keys())) + 1

        # Augmenting x_train and x_test with n-grams features
        x_train = add_ngram(x_train, token_indice, ngram_range)
        x_test = add_ngram(x_test, token_indice, ngram_range)
        # print('Average train sequence length: {}'.format(
        #     np.mean(list(map(len, x_train)), dtype=int)))
        # print('Average test sequence length: {}'.format(
        #     np.mean(list(map(len, x_test)), dtype=int)))

    print('Pad sequences (samples x time)')
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    x_val = x_test
    print('x_train shape:', x_train.shape)
    print('x_test shape:', x_test.shape)

    with graph.as_default():
        model = Sequential()
        # we start off with an efficient embedding layer which maps
        # our vocab indices into embedding_dims dimensions
        model.add(Embedding(max_features,
                            embedding_dims,
                            input_length=maxlen))

        # we add a GlobalAveragePooling1D, which will average the embeddings
        # of all words in the document
        model.add(GlobalAveragePooling1D())

        # We project onto a single unit output layer, and squash it with a
        # sigmoid:
        model.add(Dense(1, activation='sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(on_epoch_end=
                                              lambda epoch, logs:
                                              logger_service.log_epoch_end(
                                                  epoch, logs,
                                                  result_sds,
                                                  project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train, y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[batch_print_callback, best_checkpoint,
                                       general_checkpoint],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {
            'score': score,
            'history': history.history}
Пример #12
0
def imdb_cnn_lstm(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']

    # Embedding
    embedding_size = 128
    max_features = input['max_features']
    maxlen = input['maxlen']
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    x_val = x_test
    # set parameters:

    # Convolution
    kernel_size = 5
    filters = 64
    pool_size = 4

    # LSTM
    lstm_output_size = 70

    with graph.as_default():
        model = Sequential()
        model.add(Embedding(max_features, embedding_size, input_length=maxlen))
        model.add(Dropout(0.25))
        model.add(Conv1D(filters,
                         kernel_size,
                         padding='valid',
                         activation='relu',
                         strides=1))
        model.add(MaxPooling1D(pool_size=pool_size))
        model.add(LSTM(lstm_output_size))
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(on_epoch_end=
                                              lambda epoch, logs:
                                              logger_service.log_epoch_end(
                                                  epoch, logs,
                                                  result_sds,
                                                  project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train, y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[batch_print_callback, best_checkpoint,
                                       general_checkpoint],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {
            'score': score,
            'history': history.history}
Пример #13
0
def imdb_cnn(conf, input, **kw):
    result_sds = kw.pop('result_sds', None)
    project_id = kw.pop('project_id', None)
    f = conf['fit']
    e = conf['evaluate']
    x_train = input['x_tr']
    y_train = input['y_tr']
    x_val = input['x_te']
    y_val = input['y_te']
    x_test = input['x_te']
    y_test = input['y_te']
    max_features = input['max_features']
    maxlen = input['maxlen']
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    x_val = x_test
    # set parameters:
    embedding_dims = 50
    filters = 250
    kernel_size = 3
    hidden_dims = 250

    with graph.as_default():
        model = Sequential()
        model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
        model.add(Dropout(0.2))

        # we add a Convolution1D, which will learn filters
        # word group filters of size filter_length:
        model.add(
            Conv1D(filters,
                   kernel_size,
                   padding='valid',
                   activation='relu',
                   strides=1))
        # we use max pooling:
        model.add(GlobalMaxPooling1D())

        # We add a vanilla hidden layer:
        model.add(Dense(hidden_dims))
        model.add(Dropout(0.2))
        model.add(Activation('relu'))

        # We project onto a single unit output layer, and squash it with a
        # sigmoid:
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        # callback to save metrics
        batch_print_callback = LambdaCallback(
            on_epoch_end=lambda epoch, logs: logger_service.log_epoch_end(
                epoch, logs, result_sds, project_id))

        # checkpoint to save best weight
        best_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                               verbose=0,
                                               save_best_only=True)
        # checkpoint to save latest weight
        general_checkpoint = MongoModelCheckpoint(result_sds=result_sds,
                                                  verbose=0)

        # training
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            callbacks=[
                                batch_print_callback, best_checkpoint,
                                general_checkpoint
                            ],
                            verbose=0,
                            **f['args'])

        score = model.evaluate(x_test, y_test, **e['args'])
        # weights = model.get_weights()
        config = model.get_config()
        logger_service.log_train_end(result_sds,
                                     model_config=config,
                                     score=score,
                                     history=history.history)

        return {'score': score, 'history': history.history}