예제 #1
0
def rate_size_fusion(time_step=60, dropout=0.45):
    # two branches
    def mlp_branch(x):
        nb_filters = [256, 256]
        for nb_filter in nb_filters:
            x = dense_unit(x, nb_filter, dropout)
        return x

    # rate branch
    rates = KL.Input(shape=(time_step, 30))
    rates_x = KL.Flatten()(rates)
    rates_x = mlp_branch(rates_x)

    # size branch
    sizes = KL.Input(shape=(time_step, 30))
    sizes_x = KL.Flatten()(sizes)
    sizes_x = mlp_branch(sizes_x)

    # fusion
    x = KL.concatenate(([rates_x, sizes_x]))

    # outputs
    x = dense_unit(x, 1024, dropout=dropout)
    x = dense_unit(x, 2048, dropout=0)
    x = KL.Dense(1, activation='linear')(x)

    return TFModel(inputs=[rates, sizes], outputs=x)
예제 #2
0
def simple_cnn():
    # inputs : [features, timestep]
    inputs = KL.Input(shape=(62, 60))

    scale = 1

    def conv_unit(x, units, with_pooling=True):
        x = KL.Conv1D(units * scale, 3)(x)
        x = KL.ReLU()(x)
        if with_pooling:
            x = KL.MaxPooling1D()(x)
        return x

    x = inputs
    x = conv_unit(x, 64)
    x = conv_unit(x, 64)
    x = conv_unit(x, 32)

    x = KL.Flatten()(x)

    x = KL.Dense(128, activation='relu')(x)
    x = KL.Dense(11, activation='softmax')(x)

    outputs = x

    return TFModel(inputs=inputs, outputs=outputs)
예제 #3
0
def simple_model(time_step, regression=False):
    # inputs: [time_step, features]
    inputs = KL.Input(shape=(time_step, 60))
    x = inputs

    #
    def dense_unit(x, units, dropout=0.45):
        regularizer = KL.regularizers.l2(0.001)
        x = KL.TimeDistributed(
            KL.Dense(units, activation='relu',
                     kernel_regularizer=regularizer))(x)
        if dropout > 0:
            x = KL.TimeDistributed(KL.Dropout(dropout))(x)

        return x

    x = dense_unit(x, 256)
    x = dense_unit(x, 512)
    x = dense_unit(x, 1024, dropout=0)

    # x = KL.LSTM(128, return_sequences=True)(x)
    x = KL.LSTM(256)(x)

    if regression:
        x = KL.Dense(1, activation='linear')(x)
    else:
        x = KL.Dense(2, activation='softmax')(x)

    model = TFModel(inputs=inputs, outputs=x)

    return model
예제 #4
0
def deep_lob(time_steps=100, nb_features=40, regression=False):
    # inputs : [time_steps, nb_features]
    inputs = KL.Input(shape=(time_steps, nb_features, 1))

    def conv_1d(x, nb_filter, kernel_size, stride=1, by_time=True):
        by_time = int(by_time)
        padding = ['valid', 'same'][by_time]
        kernel_size = [(1, kernel_size), (kernel_size, 1)][by_time]
        stride = [(1, stride), (stride, 1)][by_time]

        x = KL.Conv2D(nb_filter, kernel_size, strides=stride,
                      padding=padding)(x)
        x = KL.LeakyReLU(alpha=0.01)(x)

        return x

    def conv_block(x, feature_size, stride=2):
        x = conv_1d(x, 16, feature_size, stride=stride, by_time=False)
        x = conv_1d(x, 16, 4)
        x = conv_1d(x, 16, 4)
        return x

    def inception_module(x):
        x0 = conv_1d(x, 32, 1)
        x0 = conv_1d(x0, 32, 3)

        x1 = conv_1d(x, 32, 1)
        x1 = conv_1d(x1, 32, 5)

        x2 = KL.MaxPooling2D(pool_size=(3, 1), strides=1, padding='same')(x)
        x2 = conv_1d(x2, 32, 1)

        return KL.Concatenate()([x0, x1, x2])

    def keras_squeeze(x):
        return K.squeeze(x, axis=2)

    def keras_squeeze_output_shape(input_shape):
        return (input_shape[0], input_shape[1], input_shape[3])

    #
    x = inputs
    features = [2, 2, 15]
    strides = [2, 2, 1]
    for feature, stride in zip(features, strides):
        x = conv_block(x, feature, stride)

    x = inception_module(x)
    x = KL.Lambda(keras_squeeze, output_shape=keras_squeeze_output_shape)(x)
    x = KL.LSTM(64)(x)

    if regression:
        x = KL.Dense(1, activation='linear')(x)
    else:
        x = KL.Dense(3, activation='softmax')(x)

    return TFModel(inputs=inputs, outputs=x)
예제 #5
0
    def _build_model(self, **kwargs):
        self.built = True
        # arguments
        batch_size, input_shape, dropout, time_steps, model_type, norm_window_size = \
            self._parse_args([
            "batch_size", "input_shape", "dropout", "time_steps", "model_type", "norm_window_size"
        ], **kwargs)
        levels = self._parse_args("levels", **kwargs)
        nb_features = levels * 2
        preprocessing = self._parse_args("preprocessing")

        # inputs
        input_shape = (time_steps + norm_window_size - 1, nb_features)
        # inputs: [time_step, features]
        inputs = KL.Input(batch_shape=(batch_size, ) + input_shape)

        # lstm branch
        # dimension shuffle
        lstm_x = inputs
        if preprocessing is not None:
            lstm_x = PreprocessingLayer(time_steps=time_steps,
                                        norm_window_size=norm_window_size,
                                        nb_features=nb_features,
                                        batch_size=batch_size)(lstm_x)

        lstm_x = KL.Reshape((1, time_steps * input_shape[1]))(lstm_x)
        # lstm
        lstm_x = KL.LSTM(units=128)(lstm_x)
        lstm_x = KL.Dropout(dropout)(lstm_x)

        # fcn branch
        def conv_unit(x, units, kernel_size=3, batch_norm=True):
            x = KL.Conv1D(units,
                          kernel_size,
                          padding='valid',
                          kernel_initializer='he_uniform')(x)
            if batch_norm:
                x = KL.BatchNormalization()(x)
            x = KL.ReLU()(x)
            return x

        fcn_x = input
        fcn_x = conv_unit(fcn_x, 128, kernel_size=8)
        fcn_x = conv_unit(fcn_x, 256, kernel_size=5)
        fcn_x = conv_unit(fcn_x, 128, kernel_size=3)
        fcn_x = KL.GlobalAveragePooling1D()(fcn_x)

        # concat
        x = KL.concatenate([lstm_x, fcn_x])
        if model_type == 'regression':
            x = KL.Dense(1, activation='linear')(x)
        else:
            x = KL.Dense(2, activation='softmax')(x)

        return TFModel(inputs=inputs, outputs=x)
예제 #6
0
    def get_intermediate_model(self, layer_name):
        """Construct intermediate model from current model

        Args:
            layer_name (str): name of layer to construct model from

        Returns:
            Model: ensorflow.keras.models.Model object.
        """
        intermediate_model = TFModel(
            inputs=self.model.input, outputs=self.model.get_layer(layer_name).output)
        return intermediate_model
예제 #7
0
    def build_model(self, **kwargs):
        # set status
        # K.clear_session()
        self.built = True
        # parse args
        batch_size, dropout, time_steps, model_type, norm_window_size, relu_alpha = \
            self._parse_args([
            "batch_size", "dropout", "time_steps", "model_type", "norm_window_size", "relu_alpha"
        ], **kwargs)
        levels = self._parse_args("levels", **kwargs)
        scale = self._parse_args(["mlp_scale"], **kwargs)
        weight_decay = self._parse_args(["weight_decay"], **kwargs)

        # create model
        nb_features = levels * 2
        input_shape = (time_steps + norm_window_size - 1, nb_features)
        inputs = KL.Input(shape=input_shape)
        x = inputs

        preprocessing = self._parse_args("preprocessing")
        if preprocessing is not None:
            x = PreprocessingLayer(time_steps=time_steps,
                                   norm_window_size=norm_window_size,
                                   nb_features=nb_features,
                                   batch_size=batch_size)(x)

        x = KL.Flatten()(x)
        nb_filters = [32, 64, 256]
        for nb_filter in nb_filters:
            x = dense_unit(x,
                           nb_filter * scale,
                           dropout=dropout,
                           relu_alpha=relu_alpha,
                           weight_decay=weight_decay)
        x = dense_unit(x,
                       512 * scale,
                       dropout=0,
                       relu_alpha=relu_alpha,
                       weight_decay=weight_decay)

        if model_type == 'regression':
            x = KL.Dense(1, activation='tanh', use_bias=True)(x)
            # x = KL.Lambda(lambda x: 5*x)(x)
        else:
            x = KL.Dense(2, activation='softmax')(x)

        outputs = x

        return TFModel(inputs=inputs, outputs=outputs)
예제 #8
0
    def _build_model(self, **kwargs):
        # set status
        K.clear_session()
        self.built = True
        # parse args
        batch_size, dropout, time_steps, model_type, norm_window_size, relu_alpha = \
            self._parse_args([
            "batch_size", "dropout", "time_steps", "model_type", "norm_window_size", "relu_alpha"
        ], **kwargs)
        levels = self._parse_args("levels", **kwargs)
        scale = self._parse_args(["mlp_scale"], **kwargs)  # default 8
        weight_decay = self._parse_args(["weight_decay"], **kwargs)
        pass_alpha = self._parse_args("pass_alpha", **kwargs)

        # create model
        nb_features = levels * 2
        input_shape = (time_steps + norm_window_size - 1, nb_features)
        # inputs: [time_step, features]
        inputs = KL.Input(batch_shape=(batch_size, ) + input_shape)
        x = inputs
        x = KL.Flatten()(x)

        nb_filters = [32, 64, 256]
        for nb_filter in nb_filters:
            x = dense_unit(x,
                           nb_filter * scale,
                           dropout=dropout,
                           relu_alpha=relu_alpha,
                           weight_decay=weight_decay)
        x0 = dense_unit(x,
                        256 * scale,
                        dropout=0,
                        relu_alpha=relu_alpha,
                        weight_decay=weight_decay)
        x1 = dense_unit(x,
                        256 * scale,
                        dropout=0,
                        relu_alpha=relu_alpha,
                        weight_decay=weight_decay)

        x0 = KL.Dense(1, activation='linear')(x0)
        x1 = KL.Dense(1, activation='sigmoid')(x1)
        x = ConditionPass(pass_alpha)([x0, x1])

        outputs = x

        return TFModel(inputs=inputs, outputs=outputs)
def build_VGG16(size=(64,64)):
    model = VGG16(weights= 'imagenet', include_top=False, input_shape=(size[0],size[1],3))
    
    for layer in model.layers:
        layer.trainable = False
        
    output = model.output
    output = Flatten()(output)
    output = Dropout(rate=0.5)(output)
    output = Dense(128)(output)
    output = Activation('relu')(output)
    output = Dropout(rate=0.5)(output)
    output = Dense(1)(output)
    output = Activation('sigmoid')(output)
    
    model = TFModel(model.input, output)
    return model
예제 #10
0
def lstm_fcn(time_step=60, regression=False, dropout=0.8, batch_norm=True):
    # inputs
    nb_feature = 60
    input = KL.Input(shape=(time_step, nb_feature))

    # lstm branch
    # dimension shuffle
    lstm_x = input
    lstm_x = KL.Reshape((1, time_step * nb_feature))(lstm_x)
    # lstm
    lstm_x = KL.LSTM(units=128)(lstm_x)
    lstm_x = KL.Dropout(dropout)(lstm_x)

    # fcn branch
    def conv_unit(x, units, kernel_size=3, batch_norm=batch_norm):
        x = KL.Conv1D(units,
                      kernel_size,
                      padding='valid',
                      kernel_initializer='he_uniform')(x)
        if batch_norm:
            x = KL.BatchNormalization()(x)
        x = KL.ReLU()(x)
        return x

    fcn_x = input
    fcn_x = conv_unit(fcn_x, 128, kernel_size=8)
    fcn_x = conv_unit(fcn_x, 256, kernel_size=5)
    fcn_x = conv_unit(fcn_x, 128, kernel_size=3)
    fcn_x = KL.GlobalAveragePooling1D()(fcn_x)

    # concat
    x = KL.concatenate([lstm_x, fcn_x])
    if regression:
        x = KL.Dense(1, activation='linear')(x)
    else:
        x = KL.Dense(2, activation='softmax')(x)

    return TFModel(inputs=input, outputs=x)
예제 #11
0
def simple_mlp(time_step=15,
               regression=False,
               dropout=0.45,
               batch_norm=False,
               leaky_relu_alpha=0.1):
    # inputs: [time_step, features]
    nb_features = 60
    norm_window_size = 10
    inputs = KL.Input(batch_shape=(256, time_step + norm_window_size - 1,
                                   nb_features))
    x = PreprocessingLayer(batch_size=256)(inputs)
    x = KL.Flatten()(x)

    scale = 1
    nb_filters = [256, 512, 2048]
    residuals = [False, False, False]
    for nb_filter, residual in zip(nb_filters, residuals):
        if residual:
            x = residual_dense_unit(x, nb_filter * scale, dropout)
        else:
            x = dense_unit(x,
                           nb_filter * scale,
                           dropout=dropout,
                           batch_norm=batch_norm,
                           leak_relu=True,
                           relu_alpha=leaky_relu_alpha)
    x = dense_unit(x, 2048, dropout=0.00, relu_alpha=leaky_relu_alpha)

    if regression:
        x = KL.Dense(1, activation='linear')(x)
    else:
        x = KL.Dense(2, activation='softmax')(x)

    outputs = x

    return TFModel(inputs=inputs, outputs=outputs)
예제 #12
0
    def build_model(self, **kwargs):
        # set status
        # K.clear_session()
        self.built = True
        # parse args
        batch_size, dropout, time_steps, model_type, norm_window_size, relu_alpha = \
            self._parse_args([
                "batch_size", "dropout", "time_steps", "model_type", "norm_window_size", "relu_alpha"
            ], **kwargs)
        levels = self._parse_args("levels", **kwargs)
        scale = self._parse_args(["mlp_scale"], **kwargs)
        weight_decay = self._parse_args(["weight_decay"], **kwargs)
        channels = 2

        # create model
        nb_features = levels * 2
        input_shape = (channels, time_steps + norm_window_size - 1,
                       nb_features)
        inputs = KL.Input(shape=input_shape)
        x = inputs

        preprocessing = self._parse_args("preprocessing")
        if preprocessing is not None:
            x = PreprocessingLayer(time_steps=time_steps,
                                   norm_window_size=norm_window_size,
                                   nb_features=nb_features,
                                   batch_size=batch_size)(x)

        x = KL.TimeDistributed(KL.Flatten())(x)

        def timedistributed_dense_unit(_x,
                                       units,
                                       dropout=0.45,
                                       relu_alpha=0.1,
                                       weight_decay=0.0):
            regularizer = regularizers.l2(weight_decay)
            kernel_initializer = {
                0: 'glorot_uniform',
                1: 'he_normal',
                2: 'he_uniform'
            }[0]
            _x = KL.TimeDistributed(
                KL.Dense(units,
                         kernel_initializer=kernel_initializer,
                         kernel_regularizer=regularizer))(_x)
            _x = KL.LeakyReLU(alpha=relu_alpha)(_x)
            _x = KL.TimeDistributed(KL.Dropout(dropout))(_x)
            return _x

        nb_filters = [32, 64, 256]
        for nb_filter in nb_filters:
            x = timedistributed_dense_unit(x,
                                           nb_filter * scale,
                                           dropout=dropout,
                                           relu_alpha=relu_alpha,
                                           weight_decay=weight_decay)
        x = timedistributed_dense_unit(x,
                                       512 * scale,
                                       dropout=0,
                                       relu_alpha=relu_alpha,
                                       weight_decay=weight_decay)

        x = KL.TimeDistributed(KL.Dense(1, activation='tanh'))(x)
        x = KL.TimeDistributed(KL.Lambda(lambda x: 5 * x))(x)

        outputs = x

        return TFModel(inputs=inputs, outputs=outputs)