コード例 #1
0
    def generate_model(self):
        """
        Model for MLP multiple regression (s2s)

        :return:
        """

        activation = self.config['arch']['activation']
        dropout = self.config['arch']['drop']
        full_layers = self.config['arch']['full']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimension = self.config['odimensions']

        # Dependent variable input
        data_input = Input(shape=(idimensions[0]))
        future_input = Input(shape=(idimensions[1]))

        to_mlp = concatenate([data_input, future_input])

        mlp_layers = Dense(full_layers[0])(to_mlp)
        mlp_layers = generate_activation(activation)(mlp_layers)
        mlp_layers = Dropout(rate=dropout)(mlp_layers)
        for units in full_layers[1:]:
            mlp_layers = Dense(units=units)(mlp_layers)
            mlp_layers = generate_activation(activation)(mlp_layers)
            mlp_layers = Dropout(rate=dropout)(mlp_layers)

        output = Dense(odimension, activation='linear')(mlp_layers)
        self.model = Model(inputs=[data_input, future_input], outputs=output)
コード例 #2
0
ファイル: NBeatsArchitecture.py プロジェクト: bejar/Wind
    def main_block(self, input, neurons_input, neurons_forecast,
                   neurons_backcast, activation, dropout):
        """
        The main block is composed by an input MLP and two linear transformations for forecasting and backcasting
        """
        # Originally four layers
        orig = Dense(neurons_input)(input)
        input_block = Dense(neurons_input)(input)
        input_block = generate_activation(activation)(input_block)
        for i in range(3):
            input_block = Dense(neurons_input)(input_block)
            input_block = generate_activation(activation)(input_block)
            input_block = Dropout(rate=dropout)(input_block)

        # Forecast output
        forecast = Dense(neurons_forecast)(input_block)
        forecast = generate_activation(activation)(forecast)
        forecast = Dropout(rate=dropout)(forecast)

        # Backcast output
        backcast = Dense(neurons_backcast)(input_block)
        backcast = generate_activation(activation)(backcast)
        backcast = Dropout(rate=dropout)(backcast)

        # We apply the subtraction to obtain the input for the next block
        return Subtract()([input, backcast]), forecast
コード例 #3
0
    def generate_model(self):
        """
        Model for MLP multiple regression (s2s)

        :return:
        """

        activation = self.config['arch']['activation']
        dropout = self.config['arch']['drop']
        full_layers = self.config['arch']['full']

        # Adding the possibility of using a GaussianNoise Layer for regularization
        if 'noise' in self.config['arch']:
            noise = self.config['arch']['noise']
        else:
            noise = 0

        if 'batchnorm' in self.config['arch']:
            bnorm = self.config['arch']['batchnorm']
        else:
            bnorm = False

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimension = self.config['odimensions']


        data_input = Input(shape=(idimensions))

        if noise != 0:
            layer = GaussianNoise(noise)(data_input)
            layer = Dense(full_layers[0])(layer)

            layer = generate_activation(activation)(layer)
            layer = Dropout(rate=dropout)(layer)
        else:
            layer = Dense(full_layers[0])(data_input)
            if bnorm:
                layer = BatchNormalization()(layer)
            layer = generate_activation(activation)(layer)
            layer = Dropout(rate=dropout)(layer)

        for units in full_layers[1:]:
            layer = Concatenate()([data_input, layer])
            layer = Dense(units=units)(layer)
            if bnorm:
                layer = BatchNormalization()(layer)
            layer = generate_activation(activation)(layer)

            layer = Dropout(rate=dropout)(layer)

        output = Dense(odimension, activation='linear')(layer)

        self.model = Model(inputs=data_input, outputs=output)
コード例 #4
0
ファイル: TCNArchitecture.py プロジェクト: bejar/Wind
    def generate_model(self):
        """
        Time Convolutional Network

        """
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']

        padding = 'causal' if 'padding' not in self.config['arch'] else self.config['arch']['padding']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch']['dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)

        activation = self.config['arch']['activation']

        full_layers = self.config['arch']['full']
        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        input = Input(shape=(idimensions))
        model = Conv1D(filters[0], input_shape=(idimensions), kernel_size=kernel_size[0], strides=strides[0],
                       padding=padding, dilation_rate=dilation[0])(input)
        model = generate_activation(activation)(model)

        for i in range(1, len(filters)):
            model = self.residual_block(model, kernel_size[0], strides[0], dilation[0], filters[i], padding, activation,drop)

        model = Flatten()(model)

        model = Dense(full_layers[0])(model)
        model = generate_activation(activationfl)(model)
        model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #5
0
    def generate_model(self):
        """
        Model for MLP recursive multiple regression (s2s)

        It takes as inputs the data and the predictions of the previous step

        :return:
        """

        activation = self.config['arch']['activation']
        dropout = self.config['arch']['drop']
        full_layers = self.config['arch']['full']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']
        rdimensions = self.config['rdimensions']

        input = Input(shape=(idimensions))
        # If there are predictions from the previous step the NN has to heads, one for the data, other for
        # the predictions
        if rdimensions > 0:
            # The dimensions of recursive input depend on the recursive steps but it is a matrix (batch, recpred)
            rinput = Input(shape=(rdimensions, ))
            recinput = concatenate([input, rinput])
        else:
            recinput = input

        # Dense layers to process the input
        model = Dense(full_layers[0])(recinput)
        model = generate_activation(activation)(model)
        model = Dropout(rate=dropout)(model)

        for units in full_layers[1:]:
            model = Dense(units=units)(model)
            model = generate_activation(activation)(model)
            model = Dropout(rate=dropout)(model)

        output = Dense(odimensions, activation='linear')(model)

        if rdimensions > 0:
            self.model = Model(inputs=[input, rinput], outputs=output)
        else:
            self.model = Model(inputs=input, outputs=output)
コード例 #6
0
ファイル: NBeatsArchitecture.py プロジェクト: bejar/Wind
    def generate_model(self):
        """
        Model for NBeats architecture

        -------------
        json config:

          "arch": {
            "ninput": 64,
            "nforecast": 64,
            "nbackcast": 65,
              "niblocks": 3,
              "neblocks": 1,
             "dropout": 0.3,
            "activation": ["relu"],
            "mode":"NBeats"
          }

        :return:
        """
        neurons_input = self.config['arch']['ninput']
        neurons_forecast = self.config['arch']['nforecast']
        neurons_backcast = self.config['arch']['nbackcast']
        neurons_full = self.config['arch']['nfull']
        dropout = self.config['arch']['dropout']
        niblocks = self.config['arch']['niblocks']  # number of internal blocks
        neblocks = self.config['arch']['neblocks']  # number of external blocks
        activation = self.config['arch']['activation']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']
        impl = self.runconfig.impl

        input = Input(shape=(idimensions))
        eblock, forecast_sum = self.group_block(niblocks, input, neurons_input,
                                                neurons_forecast,
                                                neurons_backcast, activation,
                                                dropout)

        for i in range(neblocks - 1):
            eblock, forecast = self.group_block(niblocks, eblock,
                                                neurons_input,
                                                neurons_forecast,
                                                neurons_backcast, activation,
                                                dropout)
            forecast_sum = Add()([forecast_sum, forecast])

        eforecast = Dense(neurons_full)(forecast_sum)
        eforecast = generate_activation(activation)(eforecast)

        output = Dense(odimensions, activation='linear')(eforecast)

        self.model = Model(inputs=input, outputs=output)
コード例 #7
0
ファイル: TCNArchitecture.py プロジェクト: bejar/Wind
    def residual_block(self, input, kernel_size, strides, dilation, filters, padding, activation,drop):

        ## Residual connection
        res =  Conv1D(filters, kernel_size=1, strides=strides,
                       padding=padding, dilation_rate=dilation)(input)

        ## Convolutions
        model = Conv1D(filters, kernel_size=kernel_size, strides=strides,
                       padding=padding, dilation_rate=dilation)(input)
        model = BatchNormalization()(model)
        model = generate_activation(activation)(model)

        model = Conv1D(filters, kernel_size=kernel_size, strides=strides,
                       padding=padding, dilation_rate=dilation)(model)

        model = Add()([model, res])
        model = BatchNormalization()(model)

        model = generate_activation(activation)(model)
        model = Dropout(rate=drop)(model)
        return model
コード例 #8
0
    def shortcut_layer(self, input_tensor, out_tensor, padding, activation,
                       bnorm, bias):
        shortcut_y = Conv1D(filters=int(out_tensor.shape[-1]),
                            kernel_size=1,
                            padding=padding,
                            use_bias=False)(input_tensor)
        if bnorm:
            shortcut_y = BatchNormalization()(shortcut_y)

        x = Add()([shortcut_y, out_tensor])
        x = generate_activation(activation)(x)
        return x
コード例 #9
0
    def generate_model(self):
        """
        Model for MLP with direct regression

        :return:
        """
        activation = self.config['arch']['activation']
        dropout = self.config['arch']['drop']
        full_layers = self.config['arch']['full']

        # Extra added from training function
        idimensions = self.config['idimensions']

        # self.model = Sequential()
        # self.model.add(Dense(full_layers[0], input_shape=(idimensions)))
        # self.model.add(generate_activation(activation))
        # self.model.add(Dropout(rate=dropout))
        # for units in full_layers[1:]:
        #     self.model.add(Dense(units=units))
        #     self.model.add(generate_activation(activation))
        #     self.model.add(Dropout(rate=dropout))
        # self.model.add(Flatten())
        # self.model.add(Dense(1, activation='linear'))

        data_input = Input(shape=(idimensions))
        layer = Dense(full_layers[0])(data_input)

        layer = generate_activation(activation)(layer)
        layer = Dropout(rate=dropout)(layer)
        for units in full_layers[1:]:
            layer = Dense(units=units)(layer)
            layer = generate_activation(activation)(layer)
            layer = Dropout(rate=dropout)(layer)

        output = Dense(1, activation='linear')(layer)

        self.model = Model(inputs=data_input, outputs=output)
コード例 #10
0
    def generate_model(self):
        """
        Model for MLP with direct regression

        :return:
        """
        activation = self.config['arch']['activation']
        dropout = self.config['arch']['drop']
        full_layers = self.config['arch']['full']

        # Extra added from training function
        idimensions = self.config['idimensions']


        self.model = Sequential()
        self.model.add(Dense(full_layers[0], input_shape=idimensions))
        self.model.add(generate_activation(activation))
        self.model.add(Dropout(rate=dropout))
        for units in full_layers[1:]:
            self.model.add(Dense(units=units))
            self.model.add(generate_activation(activation))
            self.model.add(Dropout(rate=dropout))
        self.model.add(Flatten())
        self.model.add(Dense(1, activation='linear'))
コード例 #11
0
    def generate_model(self):
        """
        Model for CNN with 2D convolutions for S2S

        json config:

        "arch": {
            "filters": [32],
            "strides": [1,1],
            "dilation": false,
            "kernel_size": [3],
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_s2s"
        }

        :return:
        """
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']

        kernel_size = self.config['arch']['kernel_size']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation = self.config['arch']['strides']
            strides = [[1, 1]] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [[1, 1]] * len(strides)
        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        activation = self.config['arch']['activation']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        # We assume that the kernel size for the dimension corresponding to the variables is always the number of variables
        model = Conv2D(filters[0],
                       input_shape=(idimensions),
                       kernel_size=[kernel_size[0], idimensions[1]],
                       strides=[1, strides[0]],
                       padding='same',
                       dilation_rate=dilation[0],
                       kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop)(model)

        for i in range(1, len(filters)):
            model = Conv2D(filters[i],
                           kernel_size=[kernel_size[i], idimensions[0]],
                           strides=[1, strides[i]],
                           padding='same',
                           dilation_rate=dilation[i],
                           kernel_regularizer=k_regularizer)(model)
            model = generate_activation(activation)(model)

            if drop != 0:
                model = Dropout(rate=drop)(model)

        model = Flatten()(model)
        for l in full_layers:
            model = Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #12
0
    def inception_module(self,
                         input_tensor,
                         filters,
                         kernel_size,
                         activation,
                         bottleneck,
                         bottleneck_size,
                         padding,
                         drop,
                         bnorm,
                         bias,
                         separable,
                         depth_mul,
                         stride=1):
        print('->', filters, kernel_size, activation, bottleneck,
              bottleneck_size, padding, drop, bnorm, bias, separable,
              depth_mul, stride)
        if bottleneck and int(input_tensor.shape[-1]) > 1:
            if separable:
                input_inception = SeparableConv1D(
                    filters=filters,
                    kernel_size=1,
                    strides=stride,
                    padding=padding,
                    use_bias=bias,
                    depth_multiplier=depth_mul)(input_tensor)
            else:
                input_inception = Conv1D(filters=bottleneck_size,
                                         kernel_size=1,
                                         padding=padding,
                                         use_bias=bias)(input_tensor)
            input_inception = generate_activation(activation)(input_inception)
        else:
            input_inception = input_tensor

        conv_list = []

        for i in range(len(kernel_size)):
            if separable:
                layer = SeparableConv1D(
                    filters=filters,
                    kernel_size=kernel_size[i],
                    strides=stride,
                    padding=padding,
                    use_bias=bias,
                    depth_multiplier=depth_mul)(input_inception)
            else:
                layer = Conv1D(filters=filters,
                               kernel_size=kernel_size[i],
                               strides=stride,
                               padding=padding,
                               use_bias=bias)(input_inception)

            layer = generate_activation(activation)(layer)
            if drop != 0:
                layer = Dropout(rate=drop)(layer)
            conv_list.append(layer)

        max_pool_1 = MaxPool1D(pool_size=3, strides=stride,
                               padding=padding)(input_tensor)

        conv_6 = Conv1D(filters=filters,
                        kernel_size=1,
                        padding=padding,
                        use_bias=bias)(max_pool_1)
        conv_6 = generate_activation(activation)(conv_6)

        conv_list.append(conv_6)

        x = Concatenate(axis=2)(conv_list)
        if bnorm:
            x = BatchNormalization()(x)
        x = generate_activation(activation)(x)
        return x
コード例 #13
0
    def generate_model(self):
        """
        Model for RNN with Encoder Decoder for S2S
        -------------
        json config:

        "arch": {
            "neuronsE":32,
            "neuronsD":16,
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0.3,
            "nlayers": 2,
            "nlayersE": 1,
            "nlayersD": 1,
            "activation": "relu",
            "activation_r": "hard_sigmoid",
            "bidirectiolal":[false, false] <- bidirectional [encoder, decoder]
            "CuDNN": false,
            "rnn": "GRU",
            "mode": "RNN_ED_s2s"
        }

        :return:
        """
        neuronsE = self.config['arch']['neuronsE']
        neuronsD = self.config['arch']['neuronsD']
        nlayersE = self.config['arch']['nlayersE']  # >= 1
        nlayersD = self.config['arch']['nlayersD']  # >= 1
        drop = self.config['arch']['drop']

        activation = self.config['arch']['activation']
        activation_r = self.config['arch']['activation_r']
        rec_reg = self.config['arch']['rec_reg']
        rec_regw = self.config['arch']['rec_regw']
        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']
        rnntype = self.config['arch']['rnn']
        CuDNN = self.config['arch']['CuDNN']

        if "bidirectional" in self.config['arch']:
            bidire = self.config['arch']['bidirectional'][0]
            bidird = self.config['arch']['bidirectional'][1]
            bimerge = self.config['arch']['bimerge']

        if 'backwards' in self.config['arch']:
            backwards = self.config['arch']['backwards']
        else:
            backwards = False

        # GRU parameter for alternative implementation
        if 'after' in self.config['arch']:
            after = self.config['arch']['after']
        else:
            after = False

        if 'full' in self.config['arch']:
            full = self.config['arch']['full']
            fulldrop = self.config['arch']['fulldrop']
            activation_full = self.config['arch']['activation_full']
        else:
            full = []
        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']
        impl = self.runconfig.impl

        if rec_reg == 'l1':
            rec_regularizer = l1(rec_regw)
        elif rec_reg == 'l2':
            rec_regularizer = l2(rec_regw)
        else:
            rec_regularizer = None

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif rec_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        # RNN = LSTM if rnntype == 'LSTM' else GRU

        input = Input(shape=(idimensions))

        if nlayersE == 1:

            model = generate_recurrent_layer(neuronsE,
                                             impl,
                                             drop,
                                             activation_r,
                                             rec_regularizer,
                                             k_regularizer,
                                             backwards,
                                             rnntype,
                                             after,
                                             bidire,
                                             bimerge,
                                             rseq=False)(input)

            # model = RNN(neuronsE, input_shape=(idimensions), implementation=impl,
            #                    recurrent_dropout=drop, recurrent_activation=activation_r,
            #                    recurrent_regularizer=rec_regularizer, kernel_regularizer=k_regularizer)(input)
            model = generate_activation(activation)(model)

        else:
            model = generate_recurrent_layer(neuronsE,
                                             impl,
                                             drop,
                                             activation_r,
                                             rec_regularizer,
                                             k_regularizer,
                                             backwards,
                                             rnntype,
                                             after,
                                             bidire,
                                             bimerge,
                                             rseq=True)(input)
            # model = RNN(neuronsE, input_shape=(idimensions), implementation=impl,
            #                    recurrent_dropout=drop, recurrent_activation=activation_r,
            #                    return_sequences=True, recurrent_regularizer=rec_regularizer,
            #                    kernel_regularizer=k_regularizer)(input)
            model = generate_activation(activation)(model)

            for i in range(1, nlayersE - 1):
                model = generate_recurrent_layer(neuronsE,
                                                 impl,
                                                 drop,
                                                 activation_r,
                                                 rec_regularizer,
                                                 k_regularizer,
                                                 backwards,
                                                 rnntype,
                                                 after,
                                                 bidire,
                                                 bimerge,
                                                 rseq=True)(model)

                # model = RNN(neuronsE, recurrent_dropout=drop, implementation=impl,
                #                    recurrent_activation=activation_r, return_sequences=True,
                #                    recurrent_regularizer=rec_regularizer, kernel_regularizer=k_regularizer)(model)
                model = generate_activation(activation)(model)

            model = generate_recurrent_layer(neuronsE,
                                             impl,
                                             drop,
                                             activation_r,
                                             rec_regularizer,
                                             k_regularizer,
                                             backwards,
                                             rnntype,
                                             after,
                                             bidire,
                                             bimerge,
                                             rseq=False)(model)

            # model = RNN(neuronsE, recurrent_dropout=drop,
            #                    recurrent_activation=activation_r, implementation=impl,
            #                    recurrent_regularizer=rec_regularizer, kernel_regularizer=k_regularizer)(model)
            model = generate_activation(activation)(model)

        model = RepeatVector(odimensions)(model)

        for i in range(nlayersD):
            model = generate_recurrent_layer(neuronsD,
                                             impl,
                                             drop,
                                             activation_r,
                                             rec_regularizer,
                                             k_regularizer,
                                             backwards,
                                             rnntype,
                                             after,
                                             bidird,
                                             bimerge,
                                             rseq=True)(model)
            # model = RNN(neuronsD, recurrent_dropout=drop, implementation=impl,
            #                    recurrent_activation=activation_r,
            #                    return_sequences=True, recurrent_regularizer=rec_regularizer,
            #                    kernel_regularizer=k_regularizer)(model)

            model = generate_activation(activation)(model)

        for units in full:
            model = TimeDistributed(Dense(units=units))(model)
            model = TimeDistributed(
                generate_activation(activation_full))(model)
            model = TimeDistributed(Dropout(rate=fulldrop))(model)

        output = TimeDistributed(Dense(1))(model)
        self.model = Model(inputs=input, outputs=output)
コード例 #14
0
    def generate_model(self):
        """
        Model for CNN with Encoder Decoder for S2S

        json config:

        "arch": {
            "filters": 32,
            "strides": 1,
            "dilation": false,
            "kernel_size": [3],
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_s2s"
        }

        :return:
        """
        print('soy crazyivan')
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']
        if type(kernel_size) != list:
            raise NameError('kernel size must be a list')
        elif len(kernel_size) < 1:
            raise NameError('kernel size list must have more than one element')

        strides = self.config['arch']['strides']

        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        activation = self.config['arch']['activation']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))

        lconv = []

        # Assumes several kernel sizes but only one layer for head
        for k in kernel_size:
            convomodel = Conv1D(filters[0],
                                input_shape=(idimensions),
                                kernel_size=k,
                                strides=strides[0],
                                padding='causal',
                                kernel_regularizer=k_regularizer)(input)
            convomodel = generate_activation(activation)(convomodel)

            if drop != 0:
                convomodel = Dropout(rate=drop)(convomodel)
            lconv.append(convomodel)

        convoout = Concatenate()(lconv)
        fullout = Dense(full_layers[0])(convoout)
        fullout = generate_activation(activationfl)(fullout)
        fullout = Dropout(rate=fulldrop)(fullout)

        for l in full_layers[1:]:
            fullout = Dense(l)(fullout)
            fullout = generate_activation(activationfl)(fullout)
            fullout = Dropout(rate=fulldrop)(fullout)

        fullout = Flatten()(fullout)

        output = Dense(odimensions, activation='linear')(fullout)

        self.model = Model(inputs=input, outputs=output)
コード例 #15
0
    def generate_model(self):
        """
        Model for CNN LoCo for S2S

        json config:

        "arch": {
            "filters": [32],
            "strides": [1],
            "kernel_size": [3],
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_s2s"
        }

        :return:
        """
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']

        # padding = 'causal' if 'padding' not in self.config['arch'] else  self.config['arch']['padding']
        padding = 'valid'  # LocallyConnected only supports valid padding

        strides = self.config['arch']['strides']

        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        activation = self.config['arch']['activation']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        model = LocallyConnected1D(filters[0],
                                   input_shape=(idimensions),
                                   kernel_size=kernel_size[0],
                                   strides=strides[0],
                                   padding=padding,
                                   data_format='channels_last',
                                   kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop)(model)

        for i in range(1, len(filters)):
            model = LocallyConnected1D(filters[i],
                                       kernel_size=kernel_size[i],
                                       strides=strides[i],
                                       padding=padding,
                                       data_format='channels_last',
                                       kernel_regularizer=k_regularizer)(model)
            model = generate_activation(activation)(model)

            if drop != 0:
                model = Dropout(rate=drop)(model)

        model = Flatten()(model)
        for l in full_layers:
            model = Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #16
0
    def generate_model(self):
        """
        Model for CNN with Encoder Decoder for S2S

        json config:

        "arch": {
                 "filters": [1024],
                 "strides": [2],
                 "kernel_size": [9],
                 "depth_multiplier": 8,
                 "activation": ["elu",0.3],
                 "drop": 0.6,
                 "filters2": [1024],
                 "strides2": [4],
                 "kernel_size2": [1],
                 "depth_multiplier2": 7,
                 "activation2": ["elu",0.3],
                 "drop2": 0.6,
                 "dilation": false,
                 "k_reg": "None",
                 "k_regw": 0.1,
                 "rec_reg": "None",
                 "rec_regw": 0.1,
                 "activation_full": ["elu",0.4],
                 "fulldrop": 0.10,
                 "full": [1024],
        }

        :return:
        """
        tipus = self.config['arch'][
            'tipus']  # 1 = res, 2, 2 residual, 3 skip 4 residual + skip, 5 residual + skip+max pooling
        # 1st Layer
        print('--->CNNS2SkipArchitecture')
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)

        depth_multiplier = self.config['arch']['depth_multiplier']
        activation = self.config['arch']['activation']

        # 2nd Layer
        drop2 = self.config['arch']['drop2']
        filters2 = self.config['arch']['filters2']
        kernel_size2 = self.config['arch']['kernel_size2']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation2 = self.config['arch']['strides2']
            strides2 = [1] * len(dilation)
        else:
            strides2 = self.config['arch']['strides2']
            dilation2 = [1] * len(strides2)

        depth_multiplier2 = self.config['arch']['depth_multiplier2']
        activation2 = self.config['arch']['activation2']

        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        model = SeparableConv1D(filters[0],
                                input_shape=(idimensions),
                                kernel_size=kernel_size[0],
                                strides=strides[0],
                                padding='same',
                                dilation_rate=dilation[0],
                                depth_multiplier=depth_multiplier,
                                kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)

        if (tipus != 1) or (tipus != 2):
            residual1 = Conv1D(filters2[0], 1, strides=2,
                               padding='valid')(input)
            model = Add()([model, residual1])

        if drop != 0:
            model = Dropout(rate=drop)(model)

        model = SeparableConv1D(filters2[0],
                                kernel_size=kernel_size2[0],
                                strides=strides2[0],
                                padding='same',
                                dilation_rate=dilation2[0],
                                depth_multiplier=depth_multiplier2,
                                kernel_regularizer=k_regularizer)(model)
        model = generate_activation(activation2)(model)

        if drop != 0:
            model = Dropout(rate=drop2)(model)

        input_pooled = input
        if (tipus == 5):
            input_pooled = MaxPooling1D(pool_size=(2),
                                        strides=None,
                                        padding='same')(input)
        if (tipus == 1) or (tipus == 3):
            model = Flatten()(model)
        else:
            model = Concatenate()([Flatten()(model), Flatten()(input_pooled)])

        for l in full_layers:
            model = Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #17
0
    def generate_model(self):
        """
        Model for RNN for S2S multiple regression

        -------------
        json config:

        "arch": {
            "neurons":128,
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0.3,
            "nlayers": 1,
            "activation": "tanh",
            "activation_r": "hard_sigmoid",
            "CuDNN": false,
            "bidirectional": false,
            "bimerge":"ave",
            "rnn": "GRU",
            "full": [64, 32],
            "activation_full": "sigmoid",
            "fulldrop": 0.05,
            "mode": "RNN_s2s"
        }

        :return:
        """
        neurons = self.config['arch']['neurons']
        drop = self.config['arch']['drop']
        nlayersE = self.config['arch']['nlayers']  # >= 1

        activation = self.config['arch']['activation']
        activation_r = self.config['arch']['activation_r']
        rec_reg = self.config['arch']['rec_reg']
        rec_regw = self.config['arch']['rec_regw']
        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']
        rnntype = self.config['arch']['rnn']

        full = self.config['arch']['full']
        fulldrop = self.config['arch']['fulldrop']
        activation_full = self.config['arch']['activation_full']
        bidir = self.config['arch']['bidirectional']
        bimerge = self.config['arch']['bimerge']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']
        impl = self.runconfig.impl

        if rec_reg == 'l1':
            rec_regularizer = l1(rec_regw)
        elif rec_reg == 'l2':
            rec_regularizer = l2(rec_regw)
        else:
            rec_regularizer = None

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif rec_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        RNN = LSTM if rnntype == 'LSTM' else GRU

        input = Input(shape=(idimensions))

        # self.model = Sequential()
        if nlayersE == 1:
            if bidir:
                model = Bidirectional(
                    RNN(
                        neurons,
                        implementation=impl,
                        return_sequences=True,
                        recurrent_dropout=drop,  #activation=activation,
                        recurrent_activation=activation_r,
                        recurrent_regularizer=rec_regularizer,
                        kernel_regularizer=k_regularizer),
                    merge_mode=bimerge)(input)
                model = generate_activation(activation)(model)
            else:
                model = RNN(
                    neurons,
                    implementation=impl,
                    return_sequences=True,
                    recurrent_dropout=drop,  #activation=activation,
                    recurrent_activation=activation_r,
                    recurrent_regularizer=rec_regularizer,
                    kernel_regularizer=k_regularizer)(input)
                model = generate_activation(activation)(model)
        else:
            if bidir:
                model = Bidirectional(
                    RNN(
                        neurons,
                        implementation=impl,
                        return_sequences=True,
                        recurrent_dropout=drop,  #activation=activation,
                        recurrent_activation=activation_r,
                        recurrent_regularizer=rec_regularizer,
                        kernel_regularizer=k_regularizer),
                    merge_mode=bimerge)(input)
                model = generate_activation(activation)(model)
            else:
                model = RNN(
                    neurons,
                    implementation=impl,
                    return_sequences=True,
                    recurrent_dropout=drop,  #activation=activation,
                    recurrent_activation=activation_r,
                    recurrent_regularizer=rec_regularizer,
                    kernel_regularizer=k_regularizer)(input)
                model = generate_activation(activation)(model)

            for i in range(1, nlayersE - 1):
                if bidir:
                    model = Bidirectional(
                        RNN(
                            neurons,
                            implementation=impl,
                            return_sequences=True,
                            recurrent_dropout=drop,  #activation=activation,
                            recurrent_activation=activation_r,
                            recurrent_regularizer=rec_regularizer,
                            kernel_regularizer=k_regularizer),
                        merge_mode=bimerge)(model)
                    model = generate_activation(activation)(model)
                else:
                    model = RNN(
                        neurons,
                        implementation=impl,
                        return_sequences=True,
                        recurrent_dropout=drop,  #activation=activation,
                        recurrent_activation=activation_r,
                        recurrent_regularizer=rec_regularizer,
                        kernel_regularizer=k_regularizer)(model)
                    model = generate_activation(activation)(model)

            if bidir:
                model = Bidirectional(
                    RNN(
                        neurons,
                        implementation=impl,
                        return_sequences=True,
                        recurrent_dropout=drop,  #activation=activation,
                        recurrent_activation=activation_r,
                        recurrent_regularizer=rec_regularizer,
                        kernel_regularizer=k_regularizer),
                    merge_mode=bimerge)(model)
                model = generate_activation(activation)(model)
            else:
                model = RNN(
                    neurons,
                    implementation=impl,
                    return_sequences=True,
                    recurrent_dropout=drop,  # activation=activation,
                    recurrent_activation=activation_r,
                    recurrent_regularizer=rec_regularizer,
                    kernel_regularizer=k_regularizer)(model)
                model = generate_activation(activation)(model)

        model = Flatten()(model)

        for nn in full:
            model = Dense(nn)(model)
            model = generate_activation(activation_full)(model)
            model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #18
0
ファイル: CNNS2SArchitecture.py プロジェクト: bejar/Wind
    def generate_model(self):
        """
        Model for CNN  for S2S

        json config:

        "arch": {
            "filters": [32],
            "strides": [1],
            "dilation": false,
            "kernel_size": [3],
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "squeeze":ratio,
            "padding":"causal/same/valid",
            "bias": true/false,
            "batchnorm":true/false,
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "fulltype": "mlp/conv"
            "mode":"CNN_s2s"
        }

        :return:
        """
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']

        padding = 'causal' if 'padding' not in self.config[
            'arch'] else self.config['arch']['padding']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)

        activation = self.config['arch']['activation']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if 'batchnorm' in self.config['arch']:
            bnorm = self.config['arch']['batchnorm']
        else:
            bnorm = False
        bias = True if 'bias' not in self.config['arch'] else self.config[
            'arch']['bias']

        if 'squeeze' in self.config['arch']:
            squeeze = self.config['arch']['squeeze']
        else:
            squeeze = None

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        model = Conv1D(filters[0],
                       input_shape=(idimensions),
                       kernel_size=kernel_size[0],
                       strides=strides[0],
                       padding=padding,
                       dilation_rate=dilation[0],
                       use_bias=bias,
                       kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)
        if bnorm:
            model = BatchNormalization()(model)

        if drop != 0:
            model = Dropout(rate=drop)(model)

        if squeeze is not None:
            model = squeeze_and_excitation(model, ratio=squeeze)

        for i in range(1, len(filters)):
            model = Conv1D(filters[i],
                           kernel_size=kernel_size[i],
                           strides=strides[i],
                           padding=padding,
                           dilation_rate=dilation[i],
                           use_bias=bias,
                           kernel_regularizer=k_regularizer)(model)
            model = generate_activation(activation)(model)
            if bnorm:
                model = BatchNormalization()(model)

            if drop != 0:
                model = Dropout(rate=drop)(model)

            if squeeze is not None:
                model = squeeze_and_excitation(model, ratio=squeeze)

        if 'fulltype' not in self.config['arch']:
            # MLP output, full/fulldrop/activation are used to build a MLP with a final layer that is linear with odimensions
            activationfl = self.config['arch']['activation_full']
            fulldrop = self.config['arch']['fulldrop']
            full_layers = self.config['arch']['full']
            model = Flatten()(model)

            for l in full_layers:
                model = Dense(l)(model)
                model = generate_activation(activationfl)(model)
                if bnorm:
                    model = BatchNormalization()(model)

                if fulldrop != 0:
                    model = Dropout(rate=fulldrop)(model)

            output = Dense(odimensions, activation='linear')(model)
        elif self.config['arch']['fulltype'] == 'mlp':
            # MLP output, full/fulldrop/activation are used to build a MLP with a final layer that is linear with odimensions
            activationfl = self.config['arch']['activation_full']
            fulldrop = self.config['arch']['fulldrop']
            full_layers = self.config['arch']['full']
            model = Flatten()(model)

            for l in full_layers:
                model = Dense(l)(model)
                model = generate_activation(activationfl)(model)
                if bnorm:
                    model = BatchNormalization()(model)

                if fulldrop != 0:
                    model = Dropout(rate=fulldrop)(model)

            output = Dense(odimensions, activation='linear')(model)
        elif self.config['arch']['fulltype'] == 'conv':  # "conv"
            # Fully convolutional output, size 1 stride 1 convolution with odimensions filters
            activationfl = self.config['arch']['activation_full']
            fulldrop = self.config['arch']['fulldrop']
            model = Conv1D(odimensions, kernel_size=1, strides=1)(model)
            model = generate_activation(activationfl)(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)
            model = Flatten()(model)
            # model = GlobalAveragePooling1D()(model)
            output = Dense(odimensions, activation='linear')(model)
        else:
            model = Flatten()(model)
            output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #19
0
    def generate_model(self):
        """
        Model for CNN with Encoder Decoder for S2S

        json config:

        "arch": {
            "bottleneck_filters": [256,256,0],
            "bottleneck_activation": [["elu",0.2], ["elu",0.2],["elu",0.2]],
            "inception_filters: [20,20,20]
            "inception_kernels":[[3,5,7],[3,3,3],[5,7,9]],
            "inception_activation":[["elu",0.2], ["elu",0.2],["elu",0.2]],
            "shortcut":[True,True,True]
            "inception_drop": [0.3,0.3,0.3]
            "dilation": false,
            "kernel_size": 3,   
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_s2s"
        }

        :return:
        """
        drop = self.config['arch']['drop']
        kernel_size = self.config['arch']['kernel_size']
        if type(kernel_size) != list:
            raise NameError('kernel size must be a list')
        elif len(kernel_size) < 1:
            raise NameError('kernel size list must have more than one element')

#        strides = self.config['arch']['strides']

        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        activation = self.config['arch']['activation']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        #inception architecture parameters
        bottleneck_filters = self.config['arch']['bottleneck_filters']
        bottleneck_activation = self.config['arch']['bottleneck_activation']
        inc_filters = self.config['arch']['inception_filters']
        inc_kernels = self.config['arch']['inception_kernels']
        inc_act = self.config['arch']['inception_activation']
        inc_shortcut = self.config['arch']['shortcut']
        inc_drop = self.config['arch']['inception_drop']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        inc_tensor = input
        r = 0
        i = 0
        for k in inc_filters:  # len(inc_filters)=number
            print("-->k ", k)
            if bottleneck_filters[i] != 0:  #Create Bottleneck?
                print("conv1d bottleneck -->", r, " filters, activation ",
                      bottleneck_filters[i], bottleneck_activation[i])
                r = r + 1
                bottleneck_tensor = SeparableConv1D(
                    filters=bottleneck_filters[i],
                    kernel_size=1,
                    padding='same',
                    use_bias=False)(inc_tensor)
                bottleneck_tensor = generate_activation(
                    bottleneck_activation[i])(bottleneck_tensor)
                bottleneck_tensor = Dropout(rate=0.0)(bottleneck_tensor)
            else:
                bottleneck_tensor = inc_tensor
            lconv = []

            for j in range(0, 3):
                print(
                    "conv1d -->",
                    r,
                    k,
                    inc_kernels[i][j],
                )
                r = r + 1

                inc_layer = SeparableConv1D(
                    k,
                    kernel_size=inc_kernels[i][j],
                    strides=1,
                    padding='same',
                    kernel_regularizer=k_regularizer)(bottleneck_tensor)
                inc_layer = generate_activation(activation)(inc_layer)
                print('drouput:', inc_drop[i])
                if inc_drop[i] != 0:
                    print('drouput:', inc_drop[i])
                    inc_layer = Dropout(rate=inc_drop[i])(inc_layer)
                lconv.append(inc_layer)

            max_pool1 = MaxPool1D(pool_size=3, strides=1,
                                  padding='same')(bottleneck_tensor)
            max_pool1 = SeparableConv1D(filters=32,
                                        kernel_size=1,
                                        padding='same',
                                        use_bias=False)(max_pool1)
            max_pool1 = generate_activation(activation)(max_pool1)
            lconv.append(max_pool1)

            inc_tensor = Concatenate(axis=2)(lconv)
            i = i + 1
#        x = BatchNormalization()(x)
#        x = generate_activation(["elu",0.2])(x)
#        gap_layer = GlobalAveragePooling1D()(inc_tensor)
#        gap_layer = tensor
#      convoout = Concatenate()(x)

        fullout = Dense(full_layers[0])(inc_tensor)
        fullout = generate_activation(activationfl)(fullout)
        fullout = Dropout(rate=fulldrop)(fullout)

        for l in full_layers[1:]:
            fullout = Dense(l)(fullout)
            fullout = generate_activation(activationfl)(fullout)
            fullout = Dropout(rate=fulldrop)(fullout)

        fullout = Flatten()(fullout)

        output = Dense(odimensions, activation='linear')(fullout)

        self.model = Model(inputs=input, outputs=output)
コード例 #20
0
    def generate_model(self):
        """
        Model for TimeInception
        json config:
        "arch": {
            "filters": [32],
            "residual": true/false,
            "bottleneck": true/false,
            "kernel_size": [3],
            "drop": 0,
            "activation": "relu",
            "padding":"causal/same/valid",
            "bias": true/false,
            "batchnorm":true/false,
            "depth": n
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "fulltype": "mlp/conv"
            "mode":"CNN_s2s"
        }
        :return:
        """
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']

        padding = 'causal' if 'padding' not in self.config[
            'arch'] else self.config['arch']['padding']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)

        activation = self.config['arch']['activation']

        residual = self.config['arch']['residual']
        bottle = self.config['arch']['bottleneck']
        bsize = self.config['arch']['bottleneck_size']
        depth = self.config['arch']['depth']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if 'batchnorm' in self.config['arch']:
            bnorm = self.config['arch']['batchnorm']
        else:
            bnorm = False

        bias = True if 'bias' not in self.config['arch'] else self.config[
            'arch']['bias']
        separable = False if 'separable' not in self.config[
            'arch'] else self.config['arch']['separable']
        depth_multiplier = 1 if 'depth_multiplier' not in self.config[
            'arch'] else self.config['arch']['depth_multiplier']

        input = Input(shape=(idimensions))

        x = input
        input_res = input

        for d in range(depth):

            x = self.inception_module(x, filters, kernel_size, activation,
                                      bottle, bsize, padding, drop, bnorm,
                                      bias, separable, depth_multiplier)
            if residual and d % 3 == 2:
                print(input_res, x, padding, activation, bnorm, bias)
                x = self.shortcut_layer(input_res, x, padding, activation,
                                        bnorm, bias)
                input_res = x

        gap_layer = GlobalAveragePooling1D()(x)

        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        model = gap_layer

        for l in full_layers:
            model = Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if bnorm:
                model = BatchNormalization()(model)

            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #21
0
    def generate_model(self):
        """
        Model for RNN for S2S multiple regression

        -------------
        json config:

        "arch": {
            "neurons":128,
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0.3,
            "nlayers": 1,
            "activation": "tanh",
            "activation_r": "hard_sigmoid",
            "CuDNN": false,
            "bidirectional": false,
            "bimerge":"ave",
            "rnn": "GRU",
            "full": [64, 32],
            "activation_full": "sigmoid",
            "fulldrop": 0.05,
            "mode": "RNN_s2s"
        }

        :return:
        """
        neurons = self.config['arch']['neurons']
        drop = self.config['arch']['drop']
        nlayersE = self.config['arch']['nlayers']  # >= 1

        activation = self.config['arch']['activation']
        activation_r = self.config['arch']['activation_r']
        rec_reg = self.config['arch']['rec_reg']
        rec_regw = self.config['arch']['rec_regw']
        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']
        rnntype = self.config['arch']['rnn']

        full = self.config['arch']['full']
        fulldrop = self.config['arch']['fulldrop']
        activation_full = self.config['arch']['activation_full']
        bidir = self.config['arch']['bidirectional']
        bimerge = self.config['arch']['bimerge']
        attsize = self.config['arch']['attsize']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']
        impl = self.runconfig.impl

        if rec_reg == 'l1':
            rec_regularizer = l1(rec_regw)
        elif rec_reg == 'l2':
            rec_regularizer = l2(rec_regw)
        else:
            rec_regularizer = None

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif rec_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        RNN = LSTM if rnntype == 'LSTM' else GRU

        input = Input(shape=(idimensions))

        # self.model = Sequential()
        if nlayersE == 1:
            model = RNN(neurons,
                        implementation=impl,
                        return_sequences=True,
                        recurrent_dropout=drop,
                        recurrent_activation=activation_r,
                        recurrent_regularizer=rec_regularizer,
                        kernel_regularizer=k_regularizer)(input)
            model = generate_activation(activation)(model)
        else:
            model = RNN(neurons,
                        implementation=impl,
                        return_sequences=True,
                        recurrent_dropout=drop,
                        recurrent_activation=activation_r,
                        recurrent_regularizer=rec_regularizer,
                        kernel_regularizer=k_regularizer)(input)
            model = generate_activation(activation)(model)

            for i in range(1, nlayersE - 1):
                model = RNN(neurons,
                            implementation=impl,
                            return_sequences=True,
                            recurrent_dropout=drop,
                            recurrent_activation=activation_r,
                            recurrent_regularizer=rec_regularizer,
                            kernel_regularizer=k_regularizer)(model)
                model = generate_activation(activation)(model)

            model = RNN(neurons,
                        implementation=impl,
                        return_sequences=True,
                        recurrent_dropout=drop,
                        recurrent_activation=activation_r,
                        recurrent_regularizer=rec_regularizer,
                        kernel_regularizer=k_regularizer)(model)
            model = generate_activation(activation)(model)

        ## OLD self attention code
        # attention = TimeDistributed(Dense(1, activation='tanh'))(model)
        # attention = Flatten()(attention)
        # attention = Activation('softmax')(attention)
        # attention = RepeatVector(neurons)(attention)
        # attention = Permute([2,1])(attention)
        #
        # sent_representation = multiply([model, attention])
        # sent_representation = Lambda(lambda xin: K.sum(xin, axis=-1))(sent_representation)
        #
        # # reg = TimeDistributed(Dense(1, activation='linear'))(sent_representation)
        # model = Dense(1, activation='linear')(sent_representation)
        # # model = Flatten()(reg)

        model = SelfAttention(attention_type='additive')(model)
        for nn in full:
            model = Dense(nn)(model)
            model = generate_activation(activation_full)(model)
            model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #22
0
    def generate_model(self):
        """
        Model for RNN for S2S multiple regression

        -------------
        json config:

        "arch": {
            "neurons":128,
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0.3,
            "nlayers": 1,
            "activation": "tanh",
            "activation_r": "hard_sigmoid",
            "CuDNN": false,
            "bidirectional": false,
            "bimerge":"ave",
            "rnn": "GRU",
            "full": [64, 32],
            "activation_full": "sigmoid",
            "fulldrop": 0.05,
            "mode": "RNN_s2s"
        }

        :return:
        """
        neurons = self.config['arch']['neurons']
        drop = self.config['arch']['drop']
        nlayersE = self.config['arch']['nlayers']  # >= 1

        activation = self.config['arch']['activation']
        activation_r = self.config['arch']['activation_r']
        rec_reg = self.config['arch']['rec_reg']
        rec_regw = self.config['arch']['rec_regw']
        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']
        rnntype = self.config['arch']['rnn']

        full = self.config['arch']['full']
        fulldrop = self.config['arch']['fulldrop']
        activation_full = self.config['arch']['activation_full']
        bidir = self.config['arch']['bidirectional']
        bimerge = self.config['arch']['bimerge']

        if 'backwards' in self.config['arch']:
            backwards = self.config['arch']['backwards']
        else:
            backwards = False

        # GRU parameter for alternative implementation
        if 'after' in self.config['arch']:
            after = self.config['arch']['after']
        else:
            after = False

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']
        impl = self.runconfig.impl

        if rec_reg == 'l1':
            rec_regularizer = l1(rec_regw)
        elif rec_reg == 'l2':
            rec_regularizer = l2(rec_regw)
        else:
            rec_regularizer = None

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif rec_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        # RNN = LSTM if rnntype == 'LSTM' else GRU

        input = Input(shape=(idimensions))

        if nlayersE == 1:
            model = generate_recurrent_layer(neurons, impl, drop, activation_r, rec_regularizer, k_regularizer, backwards,
                             rnntype, after, bidir, bimerge, rseq=True)(input)
            model = generate_activation(activation)(model)

        else:
            model = generate_recurrent_layer(neurons, impl, drop, activation_r, rec_regularizer, k_regularizer, backwards,
                             rnntype, after, bidir, bimerge, rseq=True)(input)
            model = generate_activation(activation)(model)


            for i in range(1, nlayersE - 1):
                model = generate_recurrent_layer(neurons, impl, drop, activation_r, rec_regularizer, k_regularizer, backwards,
                                 rnntype, after, bidir, bimerge, rseq=True)(model)
                model = generate_activation(activation)(model)


            model = generate_recurrent_layer(neurons, impl, drop, activation_r, rec_regularizer, k_regularizer, backwards,
                             rnntype, after, bidir, bimerge, rseq=True)(model)
            model = generate_activation(activation)(model)


        model = Flatten()(model)

        for nn in full:
            model = Dense(nn)(model)
            model = generate_activation(activation_full)(model)
            model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #23
0
    def generate_model(self):
        """
        Model for separable CNN for S2S

        json config:

        "arch": {
            "filters": [32],
            "strides": [1],
            "dilation": false,
            "kernel_size": [3],
            "depth_multiplier": 1,
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_sep_s2s"
        }

        :return:
        """
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)

        depth_multiplier = self.config['arch']['depth_multiplier']
        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        activation = self.config['arch']['activation']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if 'batchnorm' in self.config['arch']:
            bnorm = self.config['arch']['batchnorm']
        else:
            bnorm = False

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        model = SeparableConv1D(filters[0],
                                input_shape=(idimensions),
                                kernel_size=kernel_size[0],
                                strides=strides[0],
                                padding='same',
                                dilation_rate=dilation[0],
                                depth_multiplier=depth_multiplier,
                                kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)
        if bnorm:
            model = BatchNormalization()(model)

        if drop != 0:
            model = Dropout(rate=drop)(model)

        for i in range(1, len(filters)):
            model = SeparableConv1D(filters[i],
                                    kernel_size=kernel_size[i],
                                    strides=strides[i],
                                    padding='same',
                                    dilation_rate=dilation[i],
                                    depth_multiplier=depth_multiplier,
                                    kernel_regularizer=k_regularizer)(model)
            model = generate_activation(activation)(model)
            if bnorm:
                model = BatchNormalization()(model)

            if drop != 0:
                model = Dropout(rate=drop)(model)

        model = Flatten()(model)
        for l in full_layers:
            model = Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if bnorm:
                model = BatchNormalization()(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #24
0
    def generate_model(self):
        """
        Model for CNN with Encoder Decoder for S2S

        json config:

        "arch": {
            "filters": [32],
            "strides": [1],
            "dilation": false,
            "kernel_size": [3],
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_s2s"
        }

        :return:
        """
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)
        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        activation = self.config['arch']['activation']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        model = Conv1D(filters[0],
                       input_shape=(idimensions),
                       kernel_size=kernel_size[0],
                       strides=strides[0],
                       padding='causal',
                       dilation_rate=dilation[0],
                       kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop)(model)

        last2 = model  # keep for generating the skip connections
        last1 = input
        for i in range(1, len(filters)):
            model = Concatenate()([model, last1])
            model = Conv1D(filters[i],
                           kernel_size=kernel_size[i],
                           strides=strides[i],
                           padding='causal',
                           dilation_rate=dilation[i],
                           kernel_regularizer=k_regularizer)(model)
            model = generate_activation(activation)(model)

            if drop != 0:
                model = Dropout(rate=drop)(model)
            last1 = last2
            last2 = model

        model = Concatenate()([model, last1])
        #model = Concatenate()([Flatten()(input), Flatten()(model)])
        model = Flatten()(model)
        for l in full_layers:
            model = Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #25
0
    def generate_model(self):
        """
        Model for CNN  for S2S

        json config:

        "arch": {
            "filters": [32],
            "strides": [1],
            "dilation": false,
            "kernel_size": [3],
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_s2s"
        }

        :return:
        """
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch']['dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)

        # 2nd Layer
        drop2 = self.config['arch']['drop2']
        filters2 = self.config['arch']['filters2']
        kernel_size2 = self.config['arch']['kernel_size2']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch']['dilation']:
            dilation2 = self.config['arch']['strides2']
            strides2 = [1] * len(dilation2)
        else:
            strides2 = self.config['arch']['strides2']
            dilation2 = [1] * len(strides2)


        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        activation = self.config['arch']['activation']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None
        print('idimensions',idimensions)
        input = Input(shape=(idimensions))
        model = Conv1D(filters[0], input_shape=(idimensions), kernel_size=kernel_size[0], strides=strides[0],
                              padding='causal', dilation_rate=dilation,
                              kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop)(model)

        model = Conv1D(filters2[0], kernel_size=kernel_size2[0], strides=strides2[0],
                          padding='causal', dilation_rate=dilation2,
                          kernel_regularizer=k_regularizer)(model)
        model = generate_activation(activation)(model)

        if drop2 != 0:
            model = Dropout(rate=drop2)(model)

        model = Flatten()(model)
        for l in full_layers:
            model= Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
コード例 #26
0
    def generate_model(self):
        """
        Model for RNN with Encoder Decoder for S2S with attention
        -------------
        json config:

        "arch": {
            "neurons":32,
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0.3,
            "nlayersE": 1,
            "nlayersD": 1,
            "activation": "relu",
            "activation_r": "hard_sigmoid",
            "CuDNN": false,
            "rnn": "GRU",
            "full": [64, 32],
            "mode": "RNN_ED_s2s_att"
        }

        :return:
        """
        neurons = self.config['arch']['neurons']
        attsize = self.config['arch']['attsize']
        drop = self.config['arch']['drop']
        nlayersE = self.config['arch']['nlayersE']  # >= 1

        activation = self.config['arch']['activation']
        activation_r = self.config['arch']['activation_r']
        activation_fl = self.config['arch']['activation_fl']
        rec_reg = self.config['arch']['rec_reg']
        rec_regw = self.config['arch']['rec_regw']
        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']
        rnntype = self.config['arch']['rnn']
        CuDNN = self.config['arch']['CuDNN']
        # neuronsD = self.config['arch']['neuronsD']
        full_layers = self.config['arch']['full']
        fulldrop = self.config['arch']['fulldrop']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        impl = self.runconfig.impl

        if rec_reg == 'l1':
            rec_regularizer = l1(rec_regw)
        elif rec_reg == 'l2':
            rec_regularizer = l2(rec_regw)
        else:
            rec_regularizer = None

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif rec_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        RNN = LSTM if rnntype == 'LSTM' else GRU

        # Encoder RNN - First Input
        enc_input = Input(shape=(idimensions))
        encoder = generate_recurrent_layer(neurons, impl, drop, activation_r, rec_regularizer, k_regularizer, False,
                         rnntype, False, False, None, rseq=True)(enc_input)
        encoder = generate_activation(activation)(encoder)
        # encoder = RNN(neurons, implementation=impl,
        #               recurrent_dropout=drop, activation=activation, recurrent_activation=activation_r,
        #               recurrent_regularizer=rec_regularizer, return_sequences=True, kernel_regularizer=k_regularizer)(
        #     enc_input)

        for i in range(1, nlayersE):
            encoder = generate_recurrent_layer(neurons, impl, drop, activation_r, rec_regularizer, k_regularizer, False,
                         rnntype, False, False, None, rseq=True)(enc_input)
            encoder = generate_activation(activation)(encoder)

            # encoder = RNN(neurons, implementation=impl,
            #               recurrent_dropout=drop, activation=activation, recurrent_activation=activation_r,
            #               recurrent_regularizer=rec_regularizer, return_sequences=True,
            #               kernel_regularizer=k_regularizer)(
            #     encoder)

        decoder = AttentionDecoder(attsize, odimensions)(encoder)
        # decoder = Permute((2,1))(decoder)

        output = TimeDistributed(Dense(full_layers[0]))(decoder)
        output = TimeDistributed(generate_activation(activation_fl))(output)
        output = TimeDistributed(Dropout(rate=fulldrop))(output)
        for l in full_layers[1:]:
            output = TimeDistributed(Dense(l))(output)
            output = TimeDistributed(generate_activation(activation_fl))(output)
            output = TimeDistributed(Dropout(rate=fulldrop))(output)

        output = TimeDistributed(Dense(1, activation="linear"))(output)


        self.model = Model(inputs=enc_input, outputs=output)
コード例 #27
0
    def generate_model(self):
        """
        Model for CNN with Encoder Decoder for S2S

        json config:

        "arch": {
            "filters": 32,
            "strides": 1,
            "dilation": false,
            "kernel_size": [3],
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "drop": 0,
            "activation": "relu",
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_s2s"
        }

        :return:
        """
        # 1st head
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']
        if type(kernel_size) != list:
            raise NameError('kernel size must be a list')

        strides = self.config['arch']['strides']

        # 2nd head
        drop2 = self.config['arch']['drop2']
        filters2 = self.config['arch']['filters2']
        kernel_size2 = self.config['arch']['kernel_size2']
        if type(kernel_size2) != list:
            raise NameError('kernel size must be a list')

        strides2 = self.config['arch']['strides2']

        # 3rd head
        drop3 = self.config['arch']['drop3']
        filters3 = self.config['arch']['filters3']
        kernel_size3 = self.config['arch']['kernel_size3']
        if type(kernel_size3) != list:
            raise NameError('kernel size must be a list')

        strides3 = self.config['arch']['strides3']


        activation = self.config['arch']['activation']

        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']


        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None


        input = Input(shape=(idimensions))

        lconv = []

        # 1st head
        convomodel = Conv1D(filters[0], input_shape=(idimensions), kernel_size=kernel_size, strides=strides[0],
                          padding='causal', kernel_regularizer=k_regularizer)(input)
        convomodel = generate_activation(activation)(convomodel)

        if drop != 0:
            convomodel = Dropout(rate=drop)(convomodel)
        lconv.append(Flatten()(convomodel))

        # 2nd head
        convomodel = Conv1D(filters2[0], input_shape=(idimensions), kernel_size=kernel_size2, strides=strides2[0],
                          padding='causal', kernel_regularizer=k_regularizer)(input)
        convomodel = generate_activation(activation)(convomodel)

        if drop != 0:
            convomodel = Dropout(rate=drop2)(convomodel)
        lconv.append(Flatten()(convomodel))

        # 3rd head
        convomodel = Conv1D(filters3[0], input_shape=(idimensions), kernel_size=kernel_size3, strides=strides3[0],
                          padding='causal', kernel_regularizer=k_regularizer)(input)
        convomodel = generate_activation(activation)(convomodel)

        if drop != 0:
            convomodel = Dropout(rate=drop3)(convomodel)
        lconv.append(Flatten()(convomodel))

        convoout = Concatenate()(lconv)
        fullout = Dense(full_layers[0])(convoout)
        fullout = generate_activation(activationfl)(fullout)
        fullout = Dropout(rate=fulldrop)(fullout)

        for l in full_layers[1:]:
            fullout = Dense(l)(fullout)
            fullout = generate_activation(activationfl)(fullout)
            fullout = Dropout(rate=fulldrop)(fullout)

        #fullout = Flatten()(fullout)

        output = Dense(odimensions, activation='linear')(fullout)

        self.model = Model(inputs=input, outputs=output)
コード例 #28
0
    def generate_model(self):
        """
        Model for separable CNN for S2S

        json config:

        "arch": {
            "filters": [32],
            "strides": [1],
            "dilation": false,
            "kernel_size": [3],
            "depth_multiplier": 1,
            "activation": "relu",
            "drop": 0,
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_sep_2l_s2s"
        }

        :return:
        """

        # 1st Layer
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)

        depth_multiplier = self.config['arch']['depth_multiplier']
        activation = self.config['arch']['activation']

        # 2nd Layer
        drop2 = self.config['arch']['drop2']
        filters2 = self.config['arch']['filters2']
        kernel_size2 = self.config['arch']['kernel_size2']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation2 = self.config['arch']['strides2']
            strides2 = [1] * len(dilation)
        else:
            strides2 = self.config['arch']['strides2']
            dilation2 = [1] * len(strides2)

        depth_multiplier2 = self.config['arch']['depth_multiplier2']

        # 3nd Layer
        drop3 = self.config['arch']['drop3']
        filters3 = self.config['arch']['filters3']
        kernel_size3 = self.config['arch']['kernel_size3']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation3 = self.config['arch']['strides3']
            strides3 = [1] * len(dilation)
        else:
            strides3 = self.config['arch']['strides3']
            dilation3 = [1] * len(strides3)

        depth_multiplier3 = self.config['arch']['depth_multiplier3']

        # 4th Layer
        drop4 = self.config['arch']['drop4']
        filters4 = self.config['arch']['filters4']
        kernel_size4 = self.config['arch']['kernel_size4']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation4 = self.config['arch']['strides4']
            strides4 = [1] * len(dilation)
        else:
            strides4 = self.config['arch']['strides4']
            dilation4 = [1] * len(strides3)

        depth_multiplier4 = self.config['arch']['depth_multiplier4']

        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        model = SeparableConv1D(filters[0],
                                input_shape=(idimensions),
                                kernel_size=kernel_size[0],
                                strides=strides[0],
                                padding='same',
                                dilation_rate=dilation[0],
                                depth_multiplier=depth_multiplier,
                                kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop)(model)

        model = SeparableConv1D(filters2[0],
                                kernel_size=kernel_size2[0],
                                strides=strides2[0],
                                padding='same',
                                dilation_rate=dilation2[0],
                                depth_multiplier=depth_multiplier2,
                                kernel_regularizer=k_regularizer)(model)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop2)(model)

        model = SeparableConv1D(filters3[0],
                                kernel_size=kernel_size3[0],
                                strides=strides3[0],
                                padding='same',
                                dilation_rate=dilation3[0],
                                depth_multiplier=depth_multiplier3,
                                kernel_regularizer=k_regularizer)(model)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop3)(model)

        model = SeparableConv1D(filters4[0],
                                kernel_size=kernel_size4[0],
                                strides=strides4[0],
                                padding='same',
                                dilation_rate=dilation4[0],
                                depth_multiplier=depth_multiplier4,
                                kernel_regularizer=k_regularizer)(model)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop4)(model)

        model = Flatten()(model)
        for l in full_layers:
            model = Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)