def rmse(y_true, y_pred):
     m_factor = rmse_factor
     m = RootMeanSquaredError()
     m.update_state(inverse_transform(y_true, mmn),
                    inverse_transform(y_pred, mmn))
     # return denormalize(m.result().numpy(), mmn) * m_factor
     return m.result().numpy() * m_factor
예제 #2
0
    def build_model(self):
        input_layer = Input((self.ts_length, self.num_variables))

        conv1 = Conv1D(filters=128, kernel_size=8, padding='same')(input_layer)
        conv1 = BatchNormalization()(conv1)
        conv1 = Activation(activation='relu')(conv1)

        conv2 = Conv1D(filters=256, kernel_size=5, padding='same')(conv1)
        conv2 = BatchNormalization()(conv2)
        conv2 = Activation('relu')(conv2)

        conv3 = Conv1D(128, kernel_size=3, padding='same')(conv2)
        conv3 = BatchNormalization()(conv3)
        conv3 = Activation('relu')(conv3)

        gap_layer = GlobalAveragePooling1D()(conv3)

        output_layer = Dense(self.num_variables,
                             activation='linear')(gap_layer)

        model = Model(inputs=input_layer, outputs=output_layer)

        model.compile(loss=self.loss,
                      optimizer=self.optimizer,
                      metrics=[RootMeanSquaredError()])

        return model
예제 #3
0
def build_cnn(n_hidden=2, n_neuron=50, learning_rate=1e-3, in_shape=200, drop=0.1):
    """
    Objective: Create Convolutional Neural Network Architecture for regression
    How to get started: https://www.datatechnotes.com/2019/12/how-to-fit-regression-data-with-cnn.html
    How to tune: https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/
    Valuable info:
    https://github.com/keras-team/keras/blob/8a8ef43ffcf8d95d2880da073bbcee73e51aad48/docs/templates/getting-started/sequential-model-guide.md
    :param input_shape:
    :return:
    """
    model = keras.models.Sequential()
    # Experiment on both Conv1D and Conv2D is needed.
    # According to most tutorial, the input_shape depends on the shape of your data. In this case, the shape of our data
    # is (number_of_features, 1) since
    model.add(keras.layers.Dropout(drop, input_shape=(in_shape, 1)))  # in_shape should be iterable (tuple)
    for layer in range(n_hidden):  # create hidden layers
        model.add(keras.layers.Dense(n_neuron, activation="relu"))
        model.add(keras.layers.Dropout(drop))  # add dropout to model after the a dense layer
    model.add(Conv1D(32, 2, activation='relu', input_shape=(in_shape, 1)))

    # model.add(keras.layers.MaxPooling1D(pool_size=3))
    model.add(Conv1D(64, 2, activation='relu', input_shape=(in_shape, 1)))
    model.add(Flatten(input_shape=(in_shape, 1)))
    model.add(Dense(64, activation="relu"))
    model.add(Dense(1))
    optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(loss="mse", optimizer=optimizer, metrics=[RootMeanSquaredError(name='rmse')])
    return model
예제 #4
0
def build_nn(n_hidden=2, n_neuron=50, learning_rate=1e-3, in_shape=200, drop=0.1):
    """
    Create neural network architecture and compile.  Accepts number of hiiden layers, number of neurons,
    learning rate, and input shape. Returns compiled model.

    Keyword Arguments:
        n_hidden (integer): Number of hidden layers added to model, excludes input and output layer. Default = 2
        n_neuron (integer): Number of neurons to add to each hidden layer. Default = 50
        learning_rate (float):  Model learning rate that is passed to model optimizer.
                                Smaller values are slower, High values are prone to unstable training. Default = 0.001
        in_shape (integer): Input dimension should match number of features.  Default = 200 but should be overridden.
        drop (float): Dropout probability.  1 means drop everything, 0 means drop nothing. Default = 0.
                        Recommended = 0.2-0.6
    """

    model = keras.models.Sequential()
    # use dropout layer as input.
    model.add(keras.layers.Dropout(drop, input_shape=(in_shape,)))  # in_shape should be iterable (tuple)
    for layer in range(n_hidden):  # create hidden layers
        model.add(keras.layers.Dense(n_neuron, activation="relu"))
        model.add(keras.layers.Dropout(drop))  # add dropout to model after the a dense layer

    model.add(keras.layers.Dense(1))  # output layer

    optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(loss="mse", optimizer=optimizer, metrics=[RootMeanSquaredError(name='rmse')])

    return model
예제 #5
0
    def _build_model(self) -> Sequential:
        model = Sequential()
        for state in self.sequence:
            if isinstance(state, Input):
                model.add(layers.Input(shape=(state.units,)))
            elif isinstance(state, Hidden):
                model.add(layers.Dense(units=state.units, activation=state.activation))
            elif isinstance(state, Dropout):
                model.add(layers.Dropout(rate=state.rate))
            elif isinstance(state, Output):
                model.add(layers.Dense(units=state.units, activation=state.activation))

        optimizer = OPTIMIZER
        if optimizer == 'adam':
            optimizer = Adam(LEARNING_RATE)
        elif optimizer == 'nadam':
            optimizer = Nadam(LEARNING_RATE)
        elif optimizer == 'rmsprop':
            optimizer = RMSprop(LEARNING_RATE)

        metrics = []
        for metric in METRICS:
            if metric == 'r_square':
                metric = RSquare(name=metric, y_shape=(1,))
            elif metric == 'rmse':
                metric = RootMeanSquaredError(metric)

            metrics.append(metric)

        model.compile(
            optimizer=optimizer,
            loss=LOSS,
            metrics=metrics
        )
        return model
예제 #6
0
    def build_model(self):
        height, width, n_channels = self.img_size, self.img_size, self.num_variables

        model = Sequential([
            Conv2D(filters=32,
                   kernel_size=3,
                   padding='same',
                   input_shape=(height, width, n_channels)),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D(pool_size=2),
            Conv2D(filters=32, kernel_size=3, padding='same'),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D(pool_size=2),
            Dropout(0.25),
            Flatten(),
            Dense(256, activation='relu'),
            Dropout(0.25),
            Dense(self.num_variables, activation='linear')
        ])

        model.compile(loss=self.loss,
                      optimizer=self.optimizer,
                      metrics=[RootMeanSquaredError()])

        return model
예제 #7
0
def build_MLP(optimizer="adam", learning_rate=1e-4, loss="mse"):
    """Build, compile and return an MLP model of with the given params

    Parameters
    ----------
    optimizer : str, optional {'adam', 'rmsprop'}
        The Keras optimizer you would like to use (case insensitive input), by
        default 'adam'
    learning_rate : float, optional
        The learning rate, by default 1e-4
    loss : str, optional {all keras losses are accepted}
        Keras loss to use, by default 'mse'

    Returns
    -------
    model
        MLP sequential Keras model compiled with given params
    """
    model = Sequential(
        [
            Dense(500, activation="relu"),
            Dense(250, activation="relu"),
            Dense(125, activation="relu"),
            Dense(62, activation="relu"),
            Dense(30, activation="relu"),
            Dense(15, activation="relu"),
            Dense(7, activation="relu"),
            Dense(1),
        ]
    )
    optimizer_object = get_optimizer(optimizer=optimizer, learning_rate=learning_rate)
    model.compile(
        loss=loss, optimizer=optimizer_object, metrics=[RootMeanSquaredError()]
    )
    return model
예제 #8
0
def create_model(args, learning_rate, l1):
    hidden_layers = [int(n) for n in args.hidden_layers.split(',')]
    inputs = Input(shape=[N_FEATURES])
    hidden = inputs
    if hidden_layers != [-1]:
        for size in hidden_layers:
            hidden = Dense(size,
                           kernel_regularizer=L1L2(l1=l1),
                           bias_regularizer=L1L2(l1=l1))(hidden)
            hidden = BatchNormalization()(hidden)
            hidden = ReLU()(hidden)
    outputs = Dense(1)(hidden)
    model = Model(inputs=inputs, outputs=outputs)

    # I know this is ugly, but I added the sgd arg only later so older networks
    # do not have args.optimizer (and were optimized with Adam)
    try:
        if args.optimizer == "sgd":
            optimizer = SGD(learning_rate=learning_rate,
                            momentum=0.99,
                            nesterov=True)
        elif args.optimizer == "adam":
            optimizer = Adam(learning_rate=learning_rate)
    except AttributeError:
        optimizer = Adam(learning_rate=learning_rate)

    model.compile(
        optimizer=optimizer,
        loss='mse',
        metrics=[RootMeanSquaredError(),
                 MeanAbsoluteError(),
                 RSquare()])
    return model
예제 #9
0
파일: Models.py 프로젝트: NgLeQuocCuong/CFs
    def __init__(
        self,
        user_size=100,
        item_size=100,
        representation_layers=[],
        embedding_size=16,
        matching_layers=[32],
        activation="relu",
    ):
        def joinLst(x):
            return "_".join([str(_) for _ in x])

        self.backup_path = f"./training/deepcf__{joinLst(representation_layers)}__{joinLst([embedding_size]+matching_layers)}/mdl.ckpt"
        self.cp_callback = ModelCheckpoint(
            filepath=self.backup_path, save_weights_only=True, verbose=0
        )
        inputs = self._create_inputs(user_size, item_size)
        representation_model = self._create_representation_model(
            inputs, representation_layers, activation
        )
        matchingfunction_model = self._create_matchingfunction_model(
            inputs, embedding_size, matching_layers, activation
        )
        fusion_layer = Concatenate()([representation_model, matchingfunction_model])
        output = Dense(1, activation="sigmoid")(fusion_layer)
        self.model = Model(inputs, output, name="DeepCF")
        self.model.compile(
            optimizer="adam",
            loss=BinaryCrossentropy(),
            metrics=[RootMeanSquaredError()],
        )
예제 #10
0
파일: Models.py 프로젝트: NgLeQuocCuong/CFs
 def __init__(self, size1=512, size2=128, gru_length=20):
     self.backup_path = f"./training/zeroshot__{size1}__{size2}/mdl.ckpt"
     self.cp_callback = ModelCheckpoint(
         filepath=self.backup_path, save_weights_only=True, verbose=0
     )
     user_input = Input(shape=(gru_length, 768))
     item_input = Input(shape=(768))
     self.inputs = [user_input, item_input]
     layer1 = Dense(size1, activation="relu")
     layer2 = Dense(size2, activation="relu")
     self.layers = [layer1, layer2]
     self.gru = GRU(size2)
     user_present = self.gru(layer2(layer1(user_input)))
     item_present = layer2(layer1(item_input))
     output = Activation(activation="sigmoid")(
         Dot(axes=1)([user_present, item_present])
     )
     self.model = Model(self.inputs, output, name="ZeroShot")
     self.model.compile(
         optimizer="adam",
         loss=BinaryCrossentropy(),
         metrics=[RootMeanSquaredError()],
     )
     self._update_models()
     self._gen_score_layer(size2)
예제 #11
0
def _model_fit(train, config):
    # prepare data
    train_data = _series_to_supervised(train, n_in=config.n_input)
    X_train, y_train = train_data[:, :-1], train_data[:, -1]
    # define model
    model = Sequential()
    model.add(
        Dense(config.n_nodes, activation=config.activation, input_dim=config.n_input)
    )
    model.add(Dense(1))
    # compile
    model.compile(
        loss=config.loss, optimizer=config.optimizer, metrics=[RootMeanSquaredError()]
    )
    # fit
    history = model.fit(
        X_train,
        y_train,
        epochs=config.n_epochs,
        batch_size=config.n_batch,
        verbose=config.verbose,
        shuffle=False,
        validation_split=config.val_split,
        callbacks=[WandbCallback()],
    )
    return (model, history)
예제 #12
0
 def modelCompile(self, model, learning_rate, decay, amsgrad):
     #compile the model
     adam = Adam(learning_rate=learning_rate, decay=decay, amsgrad=amsgrad)
     model.compile(loss='mean_squared_error',
                   optimizer=adam,
                   metrics=[RootMeanSquaredError(), 'mae', 'acc'])
     return model
예제 #13
0
def build_LSTM(config):
    # Add (config.num_layers - 1) layers that return sequences
    lstm_list = [
        LSTM(
            config.num_nodes,
            return_sequences=True,
            stateful=True,
            batch_input_shape=(config.n_batch, config.n_input, 1),
            dropout=config.dropout,
            recurrent_dropout=config.recurrent_dropout,
        )
        for _ in range(config.num_layers - 1)
    ]
    # Final layer does not return sequences
    lstm_list.append(
        LSTM(
            config.num_nodes,
            return_sequences=False,
            stateful=True,
            batch_input_shape=(config.n_batch, config.n_input, 1),
            dropout=config.dropout,
            recurrent_dropout=config.recurrent_dropout,
        )
    )
    # Single node output layer
    lstm_list.append(Dense(1))
    model = Sequential(lstm_list)
    optimizer = get_optimizer(config)
    model.compile(
        loss=config.loss, optimizer=optimizer, metrics=[RootMeanSquaredError()]
    )
    return model
예제 #14
0
    def iris_model(x_train, y_train, x_val, y_val, params):

        model = Sequential()
        model.add(
            Dense(params['first_neuron'],
                  input_dim=4,
                  activation=params['activation']))

        talos.utils.hidden_layers(model, params, 3)

        model.add(Dense(3, activation='softmax'))

        if isinstance(params['optimizer'], str):
            opt = params['optimizer']
        else:
            opt = params['optimizer']()

        model.compile(optimizer=opt,
                      loss=params['losses'],
                      metrics=['acc', RootMeanSquaredError()])

        out = model.fit(x_train,
                        y_train,
                        batch_size=25,
                        epochs=params['epochs'],
                        validation_data=[x_val, y_val],
                        verbose=0)

        return out, model
예제 #15
0
def measure_rmse_tf(y_true, y_pred):
    """Calculate the RMSE score between y_true and y_pred and return it.

    Parameters
    ----------
    y_true : np.ndarray
        Array of true values
    y_pred : np.ndarray
        Array of predicted values

    Returns
    -------
    rmse : float
        The RMSE between the arrays y_true and y_pred
    """
    m = RootMeanSquaredError()
    m.update_state(y_true, y_pred)
    rmse = m.result().numpy()
    return rmse
예제 #16
0
def train_spatial_temporal_model(
        model,
        dataset_generator,
        opt='adam',
        epochs=EPOCHS,
        steps_per_epoch=STEPS_PER_EPOCH,
        include_tb=False):  # validation_data, val_steps = VALIDATION_STEPS,

    ## Early stopping
    earlystopping = EarlyStopping(monitor='loss',
                                  min_delta=0.00001,
                                  patience=10,
                                  restore_best_weights=True)  # val_loss

    # Automatically save latest best model to file
    filepath = repo_path + "models/model_saves/" + PRED_TAR + '/' + RUN_ID + ".hdf5"
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='loss',
                                 verbose=0,
                                 save_best_only=True,
                                 mode='min')

    # Set callbacks
    callbacks_list = [checkpoint, earlystopping]

    # Include tensorboard
    if include_tb:
        tensorboard_cb = tf.keras.callbacks.TensorBoard(get_run_logdir())
        callbacks_list.extend([tensorboard_cb])

    # Optimizers
    optimizers = {
        'adam': Adam(learning_rate=0.001,
                     beta_1=0.9,
                     beta_2=0.999,
                     amsgrad=False)
    }

    model.compile(loss='mean_absolute_error',
                  optimizer=optimizers[opt],
                  metrics=[mae, RootMeanSquaredError(),
                           Huber()])

    # Fit model #x = [spatial_train, temporal_train_x], y = temporal_train_y,
    history = model.fit(
        dataset_generator,
        epochs=epochs,
        use_multiprocessing=True,
        # validation_data = validation_data, validation_steps = val_steps,
        steps_per_epoch=steps_per_epoch,
        verbose=1,
        callbacks=callbacks_list)
    return (history)
예제 #17
0
    def from_saved(cls, folder):
        with open(os.path.join(folder, "args.pickle"), "rb") as f: 
            args = argparse.Namespace(**pickle.load(f))  # loads dict and converts it to namespace
        with open(os.path.join(folder,'model.json')) as f:
            json_string = json.load(f)
        model = tf.keras.models.model_from_json(json_string, custom_objects=None)
        model.load_weights(os.path.join(folder, 'weights.h5'))

        model.compile(
            loss ='mse',
            metrics = [RootMeanSquaredError(), MeanAbsoluteError(), RSquare()]
        )
        return cls(model, args)
예제 #18
0
 def __init__(self,
              n_input_steps,
              n_features,
              n_output_steps,
              reg=False,
              drop=False):
     super().__init__()
     self.model.add(LSTM(100, input_shape=(n_input_steps, n_features)))
     self.add_regulization(reg, drop)
     self.model.add(Dense(n_output_steps))
     self.model.compile(optimizer='adam',
                        loss='mse',
                        metrics=[RootMeanSquaredError()])
예제 #19
0
def create_ensemble(models):
    if len(models) == 1:
        return models[0]
    else:
        inputs = Input(shape=[N_FEATURES])
        predictions = [model(inputs) for model in models]
        outputs = average(predictions)
        model = Model(inputs=inputs, outputs=outputs)
        model.compile(
            loss='mse',
            metrics=[RootMeanSquaredError(),
                     MeanAbsoluteError(),
                     RSquare()])
        return model
예제 #20
0
    def train(self):
        """
        Train the optimizer.
        """
        self.config.logger.info("DnnOptimiser::train")

        training_generator = FluctuationDataGenerator(
            self.config.partition['train'],
            dirinput=self.config.dirinput_train,
            **self.config.params)
        validation_generator = FluctuationDataGenerator(
            self.config.partition['validation'],
            dirinput=self.config.dirinput_validation,
            **self.config.params)
        model = u_net((self.config.grid_phi, self.config.grid_r,
                       self.config.grid_z, self.config.dim_input),
                      depth=self.config.depth,
                      batchnorm=self.config.batch_normalization,
                      pool_type=self.config.pool_type,
                      start_channels=self.config.filters,
                      dropout=self.config.dropout)
        if self.config.metrics == "root_mean_squared_error":
            metrics = RootMeanSquaredError()
        else:
            metrics = self.config.metrics
        model.compile(loss=self.config.lossfun,
                      optimizer=Adam(lr=self.config.adamlr),
                      metrics=[metrics])  # Mean squared error

        model.summary()
        plot_model(model, to_file='%s/model_%s_nEv%d.png' % \
                   (self.config.dirplots, self.config.suffix, self.config.train_events),
                   show_shapes=True, show_layer_names=True)

        #log_dir = "logs/" + datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%d_%H%M%S")
        log_dir = 'logs/' + '%s_nEv%d' % (self.config.suffix,
                                          self.config.train_events)
        tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)

        model._get_distribution_strategy = lambda: None
        his = model.fit(training_generator,
                        validation_data=validation_generator,
                        use_multiprocessing=False,
                        epochs=self.config.epochs,
                        callbacks=[tensorboard_callback])

        self.__plot_train(his)
        self.save_model(model)
예제 #21
0
def ModelNN():
    classifier = Sequential([
        #Dense(25, activation = 'relu', input_dim=5),
        #BatchNormalization(),
        LSTM(64, input_shape=(8, 1), activation='tanh', return_sequences=True),
        Conv1D(32, 8, activation='tanh', strides=2, padding='same'),
        AveragePooling1D(pool_size=3, strides=1, padding='same'),
        Dense(12, activation='relu'),
        LeakyReLU(alpha=0.01),
        Dense(1),
        #Dropout(0.2)
    ])
    classifier.compile(optimizer='adam',
                       loss='huber',
                       metrics=[RootMeanSquaredError(), 'mean_squared_error'])
    return classifier
예제 #22
0
def createNN(neurons=200, dropOutRate=0.3):
    ann = Sequential()
    ann.add(
        Dense(units=neurons,
              activation='relu',
              input_dim=M.shape[1],
              kernel_initializer=GlorotNormal()))
    ann.add(Dropout(dropOutRate))
    ann.add(Dense(units=int(neurons / 2), activation='relu'))
    ann.add(Dropout(dropOutRate))
    ann.add(Dense(units=int(neurons / 4), activation='relu'))
    ann.add(Dropout(dropOutRate))
    ann.add(Dense(units=1, activation='linear'))

    ann.compile(optimizer='adam',
                loss='mse',
                metrics=['accuracy', RootMeanSquaredError()])
    return ann
예제 #23
0
def create_model(n_feature):
    ip_layer = Input(shape=(100, n_feature))
    #ip_layer = Input(shape = (100,54))(input_shape)
    lstm_layer1 = (LSTM(50,
                        return_sequences=True,
                        input_shape=(100, n_feature),
                        kernel_regularizer=reg))(ip_layer)
    lstm_layer2 = (LSTM(50, return_sequences=True,
                        kernel_regularizer=reg))(lstm_layer1)
    lstm_layer3 = (LSTM(50, return_sequences=True,
                        kernel_regularizer=reg))(lstm_layer2)
    lstm_layer4 = (LSTM(50, return_sequences=True,
                        kernel_regularizer=reg))(lstm_layer3)
    output_layer = TimeDistributed(Dense(1, activation="linear"))(lstm_layer4)
    model = tf.keras.Model(inputs=ip_layer, outputs=output_layer)
    model.compile(optimizer='adam',
                  loss=rmse_loss,
                  metrics=RootMeanSquaredError())
    return model
예제 #24
0
 def __init__(self, hparams, name, log_dir):
     self.univariate = hparams.get('UNIVARIATE', True)
     self.batch_size = int(hparams.get('BATCH_SIZE', 32))
     self.epochs = int(hparams.get('EPOCHS', 500))
     self.patience = int(hparams.get('PATIENCE', 15))
     self.val_frac = hparams.get('VAL_FRAC', 0.15)
     self.T_x = int(hparams.get('T_X', 32))
     self.metrics = [
         MeanSquaredError(name='mse'),
         RootMeanSquaredError(name='rmse'),
         MeanAbsoluteError(name='mae'),
         MeanAbsolutePercentageError(name='mape')
     ]
     self.standard_scaler = StandardScaler()
     self.forecast_start = datetime.datetime.today()
     model = None
     super(NNModel, self).__init__(model,
                                   self.univariate,
                                   name,
                                   log_dir=log_dir)
예제 #25
0
def build_LSTM_small(config):
    model = Sequential(
        [
            LSTM(
                100,
                return_sequences=True,
                stateful=True,
                batch_input_shape=(config.n_batch, config.n_input, 1),
            ),
            LSTM(50, return_sequences=True, stateful=True),
            LSTM(25, return_sequences=True, stateful=True),
            LSTM(12, return_sequences=True, stateful=True),
            LSTM(7, stateful=True),
            Dense(1),
        ]
    )
    optimizer = get_optimizer(config)
    model.compile(
        loss=config.loss, optimizer=optimizer, metrics=[RootMeanSquaredError()]
    )
    return model
예제 #26
0
 def __init__(self,
              n_input_steps,
              n_features,
              n_output_steps,
              reg=False,
              drop=False,
              n_LSTM_hidden_layers=1,
              n_cells=100):
     super().__init__()
     self.model.add(
         LSTM(n_cells,
              return_sequences=True,
              input_shape=(n_input_steps, n_features)))
     for _ in range(n_LSTM_hidden_layers):
         self.model.add(LSTM(n_cells, return_sequences=True))
     self.model.add(LSTM(n_cells))
     self.add_regulization(reg, drop)
     self.model.add(Dense(n_output_steps))
     self.model.compile(optimizer='adam',
                        loss='mse',
                        metrics=[RootMeanSquaredError()])
예제 #27
0
def linear_model():
    model = Sequential()

    model.add(
        Dense(math.ceil(1.2 * features / 2),
              kernel_initializer='random_normal',
              activation='relu',
              input_dim=features))
    model.add(Dropout(0.4))
    model.add(
        Dense(math.ceil(0.6 * features),
              kernel_initializer='random_normal',
              activation='relu',
              kernel_constraint=maxnorm(5)))
    model.add(Dropout(0.2))
    model.add(
        Dense(1, kernel_initializer='normal', kernel_constraint=maxnorm(5)))
    model.compile(optimizer=Adam(lr=0.0001),
                  loss=MeanSquaredError(),
                  metrics=[RootMeanSquaredError()])

    return model
예제 #28
0
    def build_model(self):
        input_layer = Input((self.ts_length, self.num_variables))

        x = input_layer
        input_res = input_layer

        for d in range(self.depth):
            x = self._inception_module(x)
            if self.use_residual and d % 3 == 2:
                x = self._shortcut_layer(input_res, x)
                input_res = x

        gap_layer = GlobalAveragePooling1D()(x)
        output_layer = Dense(self.num_variables,
                             activation='linear')(gap_layer)

        model = Model(inputs=input_layer, outputs=output_layer)
        model.compile(loss=self.loss,
                      optimizer=self.optimizer,
                      metrics=[RootMeanSquaredError()])

        return model
예제 #29
0
def build_MLP(config):
    # Do we need to put input_dim=config.n_input in first layer?
    # dense_list = [Dense(config.n_nodes, activation=config.activation) for _ in range(config.num_layers)]
    # dense_list.append(Dense(1))
    # model = Sequential(dense_list)
    model = Sequential(
        [
            Dense(500, activation="relu"),
            Dense(250, activation="relu"),
            Dense(125, activation="relu"),
            Dense(62, activation="relu"),
            Dense(30, activation="relu"),
            Dense(15, activation="relu"),
            Dense(7, activation="relu"),
            Dense(1),
        ]
    )
    optimizer = get_optimizer(config)
    model.compile(
        loss=config.loss, optimizer=optimizer, metrics=[RootMeanSquaredError()]
    )
    return model
예제 #30
0
 def __init__(self,
              n_input_steps,
              n_features,
              n_output_steps,
              reg=False,
              drop=False,
              n_LSTM_hidden_layers=1,
              n_cells=200):
     self.model = Sequential()
     self.model.add(
         ConvLSTM2D(64, (1, 3),
                    activation='relu',
                    input_shape=(1, 1, n_input_steps, n_features)))
     self.model.add(Flatten())
     self.model.add(RepeatVector(n_output_steps))
     for _ in range(n_LSTM_hidden_layers):
         self.model.add(
             LSTM(n_cells, activation='relu', return_sequences=True))
     self.add_regulization(reg, drop)
     self.model.add(TimeDistributed(Dense(1)))
     self.model.compile(optimizer='adam',
                        loss='mse',
                        metrics=[RootMeanSquaredError()])