Esempio n. 1
0
def main():
    network = Sequential([
        layers.Dense(256, activation=tf.nn.relu),
        layers.Dense(128, activation=tf.nn.relu),
        layers.Dense(64, activation=tf.nn.relu),
        layers.Dense(32, activation=tf.nn.relu),
        layers.Dense(10, activation=tf.nn.relu)
    ])
    network.build(input_shape=[None, 28*28])
    network.summary()

    network.compile(
            optimizer=optimizers.Adam(lr=0.01),    # 指定优化器
            loss=tf.losses.CategoricalCrossentropy(from_logits=True),   # 指定loss函数
            metrics=['accuracy']     # 指定测试标准
        )

    network.fit(
            db,   # 要训练的数据集
            epochs=10,    # 训练的周期
            validation_data=db_test,    # 用于做测试的数据集,一般写作ds_val
            validation_freq=2
        )

    network.evaluate(db_test)    # 训练完后对模型的评估,传入一个数据集

    pred = network(x)
Esempio n. 2
0
def apk_cnn():
    x = []
    y = []
    data = np.array(concat_to_tab())
    batch_size = 2
    for element in data:
        if (len(element[0]) == 600):
            #print(element[0])
            #print(len(element[0]))
            y.append(np.asarray(element[1]).astype(np.float32))
            x.append(np.asarray(element[0]).astype(np.float32))

    x = np.array(x)
    y = np.array(y)
    print(x)
    print(len(x))
    print(len(y))
    X_train, X_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.2,
                                                        train_size=0.8)
    print(len(X_train))
    model = Sequential()
    model.add(layers.Conv1D(filters=10, kernel_size=100, activation='relu'))
    model.add(layers.GlobalMaxPool1D())
    model.add(layers.Flatten())
    model.add(layers.Dense(10, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.1, nesterov=True)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    history = model.fit(X_train,
                        y_train,
                        epochs=100,
                        verbose=True,
                        validation_data=(X_test, y_test),
                        batch_size=batch_size)
    model.summary()

    print(history)
    loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
    #print("************************Results for class :"+str(curr_class)+"*********************")
    print("Training Accuracy: {:.4f}".format(accuracy))
    print("ok")
    loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
    print("Testing Accuracy:  {:.4f}".format(accuracy))

    return (x, y)
def use_Sequential_model():
    from tensorflow.keras import Sequential, callbacks, regularizers
    BATCH_SIZE = 8
    simple_model = Sequential([
        Input(shape=(33, )),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(64, activation='sigmoid'),
        Dense(32, activation='relu'),
        Dense(1, activation='sigmoid')
    ])
    stop = callbacks.EarlyStopping(monitor='val_loss',
                                   patience=30,
                                   restore_best_weights=True)
    simple_model.compile(optimizer=optimizer,
                         loss=loss_fn,
                         metrics=['accuracy'])
    hist_simple = simple_model.fit(train_features,
                                   train_labels,
                                   epochs=500,
                                   callbacks=stop,
                                   validation_split=0.15,
                                   batch_size=BATCH_SIZE)

    predictions = simple_model.predict(test_features)
    for i in range(10):
        print(f"Prediction: {label_names[int(np.round(predictions[i][0]))]}")
    test_loss, test_acc = simple_model.evaluate(test_features, test_labels)
    simple_model.save('simple_model')
Esempio n. 4
0
def function(
        snn_results,
        labels,
        output_path,
        input_layer_N_num=600,
        hidden_layer_N_num=19,
        output_layer_N_num=2  # Generic output configuration will be implemented
):
    model = Sequential()
    model.add(
        Dense(units=input_layer_N_num,
              input_dim=input_layer_N_num,
              activation="relu"))
    model.add(Dense(units=hidden_layer_N_num, activation="relu"))
    model.add(Dense(units=output_layer_N_num, activation="relu"))
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    snn_results_x = list(map(lambda x: x["fire_rate"].ravel(), snn_results))
    snn_results_x = np.vstack(snn_results_x)

    X_train, X_test, y_train, y_test = train_test_split(snn_results_x,
                                                        labels,
                                                        test_size=0.20,
                                                        random_state=42)

    model.fit(X_train, y_train, batch_size=1000, epochs=1000, verbose=0)
    model.save(os.path.join(output_path, "ann-model.h5"))

    loss, acc = model.evaluate(X_test, y_test, verbose=0)
    print("loss: %.2f%%; acc: %.2f%%" % (loss * 100, acc * 100))
Esempio n. 5
0
def fashion_model():
    model = Sequential([
        Conv2D(input_shape=x_train.shape[1:],
               filters=50,
               kernel_size=(3, 3),
               padding='same',
               kernel_initializer='he_normal'),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),
        Flatten(),
        Dense(128, activation=tf.nn.relu),
        Dense(10, activation=tf.nn.softmax)
    ])

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.001),
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=10)

    history = model.fit(x_train,
                        y_train,
                        batch_size=64,
                        epochs=25,
                        validation_data=(x_test, y_test),
                        shuffle=True,
                        callbacks=[early_stopping])

    loss, acc = model.evaluate(x_test, y_test)
    predictions = model.predict(x_test)

    print("\nLoss: {}, Acc: {}".format(loss, acc))
Esempio n. 6
0
def evaluate(model: keras.Sequential, dataset: PreppedDataset, config: Config):
    model_result = model.evaluate(
        dataset.test_set,
        dataset.test_labels,
        verbose=0,
        return_dict=True
    )
    # we could handle multiple metrics here but we are only measuring the
    # model loss (mean abs error) for now.
    metrics = {
        config.get('model.hyperparameters.loss'): model_result['loss']
    }
    run = Run(
        timestamp=datetime.now(),
        dataset_name=config.get('dataset.name'),
        preprocessing_cfg=config.get(
            'dataset.preprocessing',
            as_primitive=True),
        model_type=config.get('model.type'),
        model_hyperparameters=dict(config.get(
            'model.hyperparameters',
            as_primitive=True)),
        metric_scores=metrics
    )
    # store run and compare it to previous best runs
    metric = config.get('model.hyperparameters.loss')
    data_store = DataStore(config)
    best_run = data_store.get_best_run(run.dataset_name, metric)
    data_store.save_run(run)
    runs_info = []
    if best_run:
        runs_info.append({'label': 'Previous best', 'run': best_run})
    runs_info.append({'label': 'New run', 'run': run})
    print_run_overview(runs_info, metric)
Esempio n. 7
0
def main():
    train_ds, test_ds, vocab_size = load_dataset()

    model = Sequential([
        Embedding(vocab_size, 64),
        Bidirectional(LSTM(64, return_sequences=True)),
        # Bidirectionalなので出力ユニット数は64
        Bidirectional(LSTM(32)),
        Dense(64, activation='relu'),
        Dropout(0.5),
        Dense(1, activation='sigmoid')
    ])

    model.compile(optimizer=Adam(1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    with experiment.train():
        model.fit(train_ds, epochs=NUM_EPOCHS, validation_data=test_ds)

    with experiment.test():
        test_loss, test_acc = model.evaluate(test_ds)
        print('Test Loss: {}'.format(test_loss))
        print('Test Accuracy: {}'.format(test_acc))
Esempio n. 8
0
def main():
    (x, y), (x_val, y_val) = keras.datasets.mnist.load_data()
    print(x.shape, y.shape)
    print(x_val.shape, y_val.shape)
    batchsz = 128
    db = tf.data.Dataset.from_tensor_slices((x, y))
    db = db.map(preprocess).shuffle(60000).batch(batchsz)
    ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
    ds_val = ds_val.map(preprocess).batch(batchsz)
    network = Sequential([
        layers.Dense(256, activation='relu'),
        layers.Dense(128, activation='relu'),
        layers.Dense(64, activation='relu'),
        layers.Dense(32, activation='relu'),
        layers.Dense(10),
    ])
    network.build(input_shape=(4, 28 * 28))
    # 模型装配,指定优化器、损失函数、度量标准
    network.compile(optimizer=optimizers.Adam(lr=0.01),
                    loss=losses.CategoricalCrossentropy(from_logits=True),
                    metrics=['accuracy'])
    network.summary()
    history = network.fit(db, epochs=5, validation_data=ds_val,
                          validation_freq=2)
    print(history.history)

    x, y = next(iter(ds_val))
    # predict用一组数据进行预测
    out = network.predict(x)
    print(out)

    # evaluate返回loss和度量结果
    result = network.evaluate(ds_val)
    print(result)
def compute_results(train_dataset: PrefetchDataset, test_dataset: PrefetchDataset, model: Sequential) -> History:
    """
    Configures and trains the model, then evaluates its accuracy against the test dataset. The trained model is saved
    to the outputs folder.
    :param train_dataset: A PrefetchDataset that the model will be trained on.
    :param test_dataset: A PrefetchDataset that the model's accuracy will be evaluated against.
    :param model: The Sequential object to be trained.
    :return: A History object containing information about the model's metrics during the training process.
    """

    model.compile(loss=BinaryCrossentropy(from_logits=True), optimizer=Adam(), metrics=["accuracy"])

    # Here we introduce an early stopping callback function that will cease training once the validation loss
    # stops decreasing. This is to minimize over-fitting (i.e. reduce the difference between training loss
    # and validation loss).
    # Idea retrieved from https://machinelearningmastery.com/early-stopping-to-avoid-overtraining-neural-network-models/
    es_callback = EarlyStopping(monitor="val_loss", patience=3)

    # Train the model
    history = model.fit(train_dataset, validation_data=test_dataset, batch_size=BATCH_SIZE, epochs=EPOCHS,
                        callbacks=[es_callback])

    # Get the loss values and metrics once evaluating the model against the test dataset
    test_loss, test_accuracy = model.evaluate(test_dataset)

    print(f"Test Loss: {test_loss}")
    print(f"Test Accuracy: {test_accuracy}")

    model.save(ROOT / "outputs/my_model")

    return history
Esempio n. 10
0
def create_mlp_for_binary_classification():
    from pandas import read_csv
    from sklearn.model_selection import train_test_split
    from sklearn.preprocessing import LabelEncoder
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Dense

    # load the dataset
    ionosphere_dataset_path = '.\KerasToTensorFlow\ionosphere.csv'
    ionosphere_df = read_csv(ionosphere_dataset_path, header=None)

    # split dataset into input and output columns
    X, y = ionosphere_df.values[:, :-1], ionosphere_df.values[:, -1]

    # ensure all data are floating point values
    X = X.astype('float32')

    # encode strings to integer
    y = LabelEncoder().fit_transform(y)

    # split dataset into train and test datasets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

    # determine the number of input features
    num_input_features = X_train.shape[1]

    # define model
    model = Sequential()
    model.add(
        Dense(10,
              activation='relu',
              kernel_initializer='he_normal',
              input_shape=(num_input_features, )))
    model.add(Dense(8, activation='relu', kernel_initializer='he_normal'))
    model.add(Dense(1, activation='sigmoid'))

    # compile the model
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # fit the model
    model.fit(X_train, y_train, epochs=150, batch_size=32, verbose=0)

    # evaluate the model
    loss, model_accuracy = model.evaluate(X_test, y_test, verbose=0)
    print('Test Accuracy: %.3f' % model_accuracy)

    # make a prediction
    row = [
        1, 0, 0.99539, -0.05889, 0.85243, 0.02306, 0.83398, -0.37708, 1,
        0.03760, 0.85243, -0.17755, 0.59755, -0.44945, 0.60536, -0.38223,
        0.84356, -0.38542, 0.58212, -0.32192, 0.56971, -0.29674, 0.36946,
        -0.47357, 0.56811, -0.51171, 0.41078, -0.46168, 0.21266, -0.34090,
        0.42267, -0.54487, 0.18641, -0.45300
    ]
    yhat = model.predict([row])
    print('Predicted: %.3f' % yhat)

    return model_accuracy
Esempio n. 11
0
def create_CNN_model(X_train, y_train, X_test, y_test, X_val, y_val):
    # Reshape data
    X_train_flattened = X_train.reshape(len(X_train), 28 * 28)
    X_test_flattened = X_test.reshape(len(X_test), 28 * 28)
    X_val_flattened = X_val.reshape(len(X_val), 28 * 28)

    cnn = Sequential()
    cnn.add(Dense(units=128, activation='relu'))
    cnn.add(Dense(units=64, activation='relu'))
    cnn.add(Dense(units=10, activation='softmax'))

    callback = EarlyStopping(monitor='val_loss',
                             patience=10,
                             restore_best_weights=True)
    cnn.compile(optimizer=Adam(),
                loss=SparseCategoricalCrossentropy(),
                metrics=['accuracy'])
    history = cnn.fit(x=X_train_flattened,
                      y=y_train,
                      validation_data=(X_val_flattened, y_val),
                      epochs=600,
                      batch_size=32,
                      verbose=False,
                      callbacks=[callback])

    test_loss, test_acc = cnn.evaluate(x=X_test_flattened, y=y_test)

    print(f"Test loss: {test_loss}")
    print(f"Test accuracy: {test_acc}")

    predictions = cnn.predict(X_test_flattened)

    cnn.save('trained_model.h5')

    return predictions
Esempio n. 12
0
def train_test_model(hparams, logdir):
    model = Sequential(
        [Dense(units=hparams[HP_HIDDEN], activation='relu'),
         Dense(units=1)])
    model.compile(loss='mean_squared_error',
                  optimizer=tf.keras.optimizers.Adam(
                      hparams[HP_LEARNING_RATE]),
                  metrics=['mean_squared_error'])
    model.fit(
        X_scaled_train,
        y_train,
        validation_data=(X_scaled_test, y_test),
        epochs=hparams[HP_EPOCHS],
        verbose=False,
        callbacks=[
            tf.keras.callbacks.TensorBoard(logdir),  # log metrics
            hp.KerasCallback(logdir, hparams),  # log hparams
            tf.keras.callbacks.EarlyStopping(
                monitor='val_loss',
                min_delta=0,
                patience=200,
                verbose=0,
                mode='auto',
            )
        ],
    )
    _, mse = model.evaluate(X_scaled_test, y_test)
    pred = model.predict(X_scaled_test)
    r2 = r2_score(y_test, pred)
    return mse, r2
Esempio n. 13
0
def evaluate_model(trainX, trainy, testX, testy):
    verbose, epochs, batch_size = 0, 15, 64
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    model = Sequential()
    model.add(
        LSTM(100,
             input_shape=(n_timesteps, n_features),
             return_sequences=False))
    model.add(Dropout(0.5))
    model.add(Dense(100, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit network
    model.fit(trainX,
              trainy,
              epochs=epochs,
              batch_size=batch_size,
              verbose=verbose)
    # evaluate model
    _, accuracy = model.evaluate(testX,
                                 testy,
                                 batch_size=batch_size,
                                 verbose=0)
    return accuracy
Esempio n. 14
0
def create_mlp_for_multiclass_classification():
    from numpy import argmax
    from pandas import read_csv
    from sklearn.model_selection import train_test_split
    from sklearn.preprocessing import LabelEncoder
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Dense

    # load the dataset
    iris_dataset_path = '.\KerasToTensorFlow\iris.csv'
    iris_df = read_csv(iris_dataset_path, header=None)

    # split into input and output columns
    X, y = iris_df.values[:, :-1], iris_df.values[:, -1]

    # ensure all data are floating point values
    X = X.astype('float32')

    # encode strings to integer
    y = LabelEncoder().fit_transform(y)

    # split into train and test datasets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
    print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)

    # determine the number of input features
    num_input_features = X_train.shape[1]

    # define model
    model = Sequential()
    model.add(
        Dense(10,
              activation='relu',
              kernel_initializer='he_normal',
              input_shape=(num_input_features, )))
    model.add(Dense(8, activation='relu', kernel_initializer='he_normal'))
    model.add(Dense(3, activation='softmax'))

    # compile the model
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # fit the model
    model.fit(X_train, y_train, epochs=150, batch_size=32, verbose=0)

    # evaluate the model
    loss, model_accuracy = model.evaluate(X_test, y_test, verbose=0)
    print('Test Accuracy: %.3f' % model_accuracy)

    # make a prediction
    row = [5.1, 3.5, 1.4, 0.2]
    yhat = model.predict([row])
    print('Predicted: %s (class=%d)' % (yhat, argmax(yhat)))

    return model_accuracy
def main():
    # Cannot do stacked ConditionalRNN with Sequential. Have to rely on the functional API. See below.
    model = Sequential(layers=[
        ConditionalRNN(
            NUM_CELLS, cell='LSTM', return_sequences=True, name='cond_rnn_0'),
        LSTM(NUM_CELLS),
        Dense(units=NUM_CLASSES, activation='softmax')
    ])

    # Stacked ConditionalRNN with the functional API.
    i = Input(shape=[TIME_STEPS, INPUT_DIM], name='input_0')
    c = Input(shape=[NUM_CLASSES], name='input_1')
    # add the condition tensor here.
    x = ConditionalRNN(NUM_CELLS,
                       cell='LSTM',
                       return_sequences=True,
                       name='cond_rnn_0')([i, c])
    # and here too.
    x = ConditionalRNN(NUM_CELLS,
                       cell='LSTM',
                       return_sequences=False,
                       name='cond_rnn_1')([x, c])
    x = Dense(units=NUM_CLASSES, activation='softmax')(x)
    model2 = Model(inputs=[i, c], outputs=[x])

    # Define (dummy) data.
    train_inputs = np.random.uniform(size=(NUM_SAMPLES, TIME_STEPS, INPUT_DIM))
    test_inputs = np.random.uniform(size=(NUM_SAMPLES, TIME_STEPS, INPUT_DIM))
    test_targets = train_targets = create_conditions()

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model2.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

    model.fit(x=[train_inputs, train_targets],
              y=train_targets,
              validation_data=([test_inputs, test_targets], test_targets),
              epochs=3)

    model2.fit(x=[train_inputs, train_targets],
               y=train_targets,
               validation_data=([test_inputs, test_targets], test_targets),
               epochs=3)

    assert abs(
        model.evaluate([test_inputs, test_targets], test_targets)[1] -
        1) < 1e-5
    assert abs(
        model2.evaluate([test_inputs, test_targets], test_targets)[1] -
        1) < 1e-5
def evaluate_model(model: Sequential, testSet: DataSet, verbose=2):
    mean, std = sf.read_std_from_csv('std')
    test_std = sf.standardize_dataset(testSet.features, mean, std)
    results = model.evaluate(x=test_std,
                             y=testSet.labels,
                             batch_size=128,
                             verbose=verbose)

    accuracy = results[-1]
    loss = results[0]

    return accuracy, loss
Esempio n. 17
0
def train(
    label: Label = Label.title,
    name: Model = Model.vit32,
    num_layers: int = 3,
    epochs: int = 5,
    dropout_prob: float = 0.4,
    batch_size: int = TEST_EXAMPLES,
):
    log_dir = LOG_PATH.format(
        model=name.name,
        label=label.name,
        num_layers=num_layers,
        dropout_prob=dropout_prob,
    )
    model_dir = MODEL_PATH.format(
        model=name.name,
        label=label.name,
        num_layers=num_layers,
        dropout_prob=dropout_prob,
    )
    if path.exists(log_dir):
        rmtree(log_dir, ignore_errors=True)
    if path.exists(model_dir):
        rmtree(model_dir, ignore_errors=True)

    data = prepare(model=name, label=label)

    model = Sequential()
    model.add(layers.Flatten())
    for l in range(num_layers):
        model.add(layers.Dense(SIZE[name]))
        model.add(layers.Activation(activations.relu))
        model.add(layers.Dropout(dropout_prob))
    model.add(layers.Dense(SIZE[name]))
    model.compile(
        loss=losses.cosine_similarity,
        metrics=[metrics.CosineSimilarity()],
        optimizer=optimizers.Adam(),
    )
    training = model.fit(
        x=data.X_train,
        y=data.Y_train,
        batch_size=batch_size,
        callbacks=[callbacks.TensorBoard(
            log_dir=log_dir,
            histogram_freq=1,
        )],
        epochs=epochs,
        validation_data=(data.X_dev, data.Y_dev),
    )
    results = model.evaluate(data.X_test, data.Y_test, batch_size=batch_size)
    model.save(model_dir)
Esempio n. 18
0
class FeaturesBasedModel:
    def __init__(self, model_path=None):
        self.name = 'features_based_model'
        self.log_dir = "logs/fit/" + self.name
        self.encoder = pickle.load(open('save/encoder.pickle', 'rb'))
        if model_path is not None:
            self.model = models.load_model(model_path)
            self.scaler = pickle.load(open('save/f_scaler.pickle', 'rb'))
        else:
            self.model = None
            self.scaler = StandardScaler()

    def train(self, csv_file):
        data = pd.read_csv(csv_file, encoding='ISO-8859-1')

        data.drop(['title'], axis=1, inplace=True)

        # obtaining target column
        genre_column = data['genre']
        y = self.encoder.transform(genre_column)
        n_classes = len(self.encoder.classes_)

        # obtaining feature columns and scaler
        X = np.array(data.drop(['genre'], axis=1), dtype=float)
        X = self.scaler.fit_transform(X)
        with open('save/f_scaler.pickle', 'wb') as pickle_out:
            pickle.dump(self.scaler, pickle_out)

        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

        self.model = Sequential()
        self.model.add(layers.Dense(256, activation='relu', input_shape=(X_train.shape[1],)))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dense(64, activation='relu'))
        self.model.add(layers.Dense(n_classes, activation='softmax'))
        self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

        tb_callback = callbacks.TensorBoard(log_dir=self.log_dir, histogram_freq=1)

        self.model.fit(X_train, y_train, epochs=20, callbacks=[tb_callback])
        print('training done\n')

        test_loss, test_acc = self.model.evaluate(X_test, y_test)
        print('test_acc: ', test_acc)
        print(self.model.summary())

        self.model.save('./save/fb_model')

    def predict(self, features_set):
        scaled_features = self.scaler.transform(np.array(features_set, dtype=float))
        return self.model.predict(scaled_features)
def f_build_ANN(arg_X_train, arg_y_train, arg_X_test, arg_y_test,
                arg_in_layers, arg_out_layers, arg_compile_parms,
                arg_fit_parms):
    # --------------------------------------------------------------------
    # Initializing the model
    # --------------------------------------------------------------------
    model = Sequential()
    # ----------------------------------------------------------------------------------------------
    # Neural Netwoek Architecture
    # ----------------------------------------------------------------------------------------------

    for in_layer in arg_in_layers:
        model.add(
            Dense(units=in_layer[0],
                  activation=in_layer[1],
                  input_shape=in_layer[2]))

    model.add(Flatten())
    # --------------------------------------------------------------------
    # Adding the output layer
    # --------------------------------------------------------------------
    for out_layer in arg_out_layers:
        model.add(Dense(out_layer[0], activation=out_layer[1]))

    # ----------------------------------------------------------------------------------------------
    # Generating Model Summary
    # ----------------------------------------------------------------------------------------------
    model.summary()

    # ----------------------------------------------------------------------------------------------
    # Neural Netwoek Model Compilation
    # ----------------------------------------------------------------------------------------------
    model.compile(optimizer=arg_compile_parms[0][0],
                  loss=arg_compile_parms[0][1],
                  metrics=arg_compile_parms[0][2])
    # --------------------------------------------------------------------
    # Fitting the Model
    # --------------------------------------------------------------------
    model_history = model.fit(arg_X_train,
                              arg_y_train,
                              epochs=arg_fit_parms[0][0],
                              batch_size=arg_fit_parms[0][1],
                              validation_data=(arg_X_test, arg_y_test))

    # --------------------------------------------------------------------
    # Printing the model Accuracy
    # --------------------------------------------------------------------
    model_accuracy = model.evaluate(arg_X_test, arg_y_test, verbose=0)[1]
    print('Model Accuracy is ', model_accuracy)

    return model
Esempio n. 20
0
def predictIncompleteFusion(x_train, y_train, x_test, y_test):

    classifier = Sequential()
    classifier.add(Dense(45, activation='sigmoid', kernel_initializer='random_normal', input_dim=13))
    classifier.add(Dropout(0.6))
    classifier.add(Dense(45, activation='sigmoid', kernel_initializer='random_normal'))
    classifier.add(Dropout(0.6))
    classifier.add(Dense(1, activation='sigmoid', kernel_initializer='random_normal'))
    optimizer=optimizers.SGD(lr=0.9, momentum=0.4)
    classifier.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])

    history = classifier.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=1, epochs=250)

    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title("IncomFus' accuracy graph")
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['accuracy', 'validation accuracy'], loc='upper left')
    plt.show()

    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title("IncomFus' loss graph")
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['loss', 'validation loss'], loc='upper left')
    plt.show()

    y_pred = classifier.predict(x_test)

    pd.DataFrame(y_pred).to_csv("C:\\Users\\nurizdau\\Desktop\\predicted_probs_incomfus.csv")

    y_pred[:] = (y_pred[:] > 0.5)

    # pd.DataFrame(y_pred).to_csv("C:\\Users\\nurizdau\\Desktop\\predicted_results_incomfus.csv")

    eval_model = classifier.evaluate(x_test, y_test)
    resultString = "Accuracy (incomplete fusion) is printed here: " + str(eval_model[1])

    new_test_y = y_test.values.flatten()
    new_pred_y = y_pred.astype(int).flatten()
    cm = confusion_matrix(new_test_y, new_pred_y)
    print(cm)
    print("accuracy of Incomplete Fusion is: ", ((cm[0][0] + cm[1][1]) / np.sum(cm)) * 100)
    print("sensitivity of Incomplete Fusion is: ", (cm[0][0] / np.sum(cm[0])) * 100)
    print("specificity of Incomplete Fusion is: ", (cm[1][1] / np.sum(cm[1])) * 100)

    return resultString
Esempio n. 21
0
def main(config="../../config.yaml", param="param_conf.yaml"):
    if isinstance(param, str):
        param = JobConfig.load_from_file(param)
    if isinstance(config, str):
        config = JobConfig.load_from_file(config)
        data_base_dir = config["data_base_dir"]
    else:
        data_base_dir = config.data_base_dir

    epoch = param["epoch"]
    lr = param["lr"]
    batch_size = param.get("batch_size", -1)
    optimizer_name = param.get("optimizer", "Adam")
    loss = param.get("loss", "categorical_crossentropy")
    metrics = param.get("metrics", ["accuracy"])
    layers = param["layers"]
    is_multy = param["is_multy"]
    data = dataset[param.get("dataset", "vehicle")]

    model = Sequential()
    for layer_config in layers:
        layer = getattr(tensorflow.keras.layers, layer_config["name"])
        layer_params = layer_config["params"]
        model.add(layer(**layer_params))

    model.compile(
        optimizer=getattr(optimizers, optimizer_name)(learning_rate=lr),
        loss=loss,
        metrics=metrics,
    )

    data_path = pathlib.Path(data_base_dir)
    data_with_label = pandas.concat([
        pandas.read_csv(data_path.joinpath(data["guest"]), index_col=0),
        pandas.read_csv(data_path.joinpath(data["host"]), index_col=0),
    ]).values
    data = data_with_label[:, 1:]
    if is_multy:
        labels = to_categorical(data_with_label[:, 0])
    else:
        labels = data_with_label[:, 0]
    if batch_size < 0:
        batch_size = len(data_with_label)
    model.fit(data, labels, epochs=epoch, batch_size=batch_size)
    evaluate = model.evaluate(data, labels)
    metric_summary = {"accuracy": evaluate[1]}
    data_summary = {}
    return data_summary, metric_summary
Esempio n. 22
0
def main():
    # Загружаем данные
    (x_training, y_training), (x_test, y_test) = fashion_mnist.load_data()

    # Список с названиями классов
    classes = [
        'футболка', 'брюки', 'свитер', 'платье', 'пальто', 'туфли', 'рубашка',
        'кроссовки', 'сумка', 'ботинки'
    ]

    # Преобразование размерности изображений
    x_training = x_training.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)

    # Нормализация данных
    x_training = x_training / 255
    x_test = x_test / 255

    # Преобразуем метки в категории
    y_training = utils.to_categorical(y_training, 10)
    y_test = utils.to_categorical(y_test, 10)

    # Создаем последовательную модель
    model = Sequential()

    # Добавляем уровни сети
    model.add(Dense(800, input_dim=784, activation="relu"))
    model.add(Dense(10, activation="softmax"))

    # Компилируем модель
    model.compile(loss="categorical_crossentropy",
                  optimizer="SGD",
                  metrics=["accuracy"])

    print(model.summary())

    # Обучаем сеть
    history = model.fit(x_training,
                        y_training,
                        batch_size=200,
                        epochs=100,
                        validation_split=0.2,
                        verbose=1)

    # Оцениваем качество обучения сети на тестовых данных
    scores = model.evaluate(x_test, y_test, verbose=1)
    print("Доля верных ответов на тестовых данных, в процентах:",
          round(scores[1] * 100, 4))
Esempio n. 23
0
class Classifier:
    def __init__(self):
        self.classif = Sequential()

        self.classif.add(Dense(20,
                               activation='relu',
                               kernel_initializer='random_normal',
                               input_dim=1))
        # self.classif.add(Activation(custom_activation, name='SpecialActivation'))

        self.classif.add(Dense(20,
                               activation='relu',
                               kernel_initializer='random_normal',
                               input_dim=20))

        self.classif.add(Dense(20,
                               activation='sigmoid',
                               kernel_initializer='random_normal',
                               input_dim=20))

        self.classif.add(Dense(1,
                               activation='sigmoid',
                               kernel_initializer='random_normal',
                               input_dim=20))

        self.classif.compile(optimizer='Nadam', loss=MeanSquaredError(), metrics=["mean_squared_error"])

    def train(self, x_train, y_train):
        batch_size = 1000

        self.classif.fit(x_train,
                         y_train,
                         batch_size=batch_size,
                         epochs=5000,
                         shuffle=True)
        return self.classif.evaluate(x_train, y_train)

    def test(self, x_test):
        y_pred = self.classif.predict(x_test)
        return y_pred

    def save(self):
        self.classif.save('data/classifier.h5')
        print("classifier is saved")

    def load(self, classifier_name: str = 'classifier'):
        self.classif = load_model(f'data/{classifier_name}.h5')
        print("classifier is loaded")
Esempio n. 24
0
def create_mlp_for_regression_predictions():
    from numpy import sqrt
    from pandas import read_csv
    from sklearn.model_selection import train_test_split
    from sklearn.preprocessing import LabelEncoder
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Dense

    boston_dataset_path = '.\\KerasToTensorFlow\\boston.csv'
    boston_df = read_csv(boston_dataset_path, header=None)

    # split into input and output columns
    X, y = boston_df.values[:, :-1], boston_df.values[:, -1]

    # encode strings to integer
    y = LabelEncoder().fit_transform(y)

    # split into train and test datasets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
    print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)

    # determine the number of input features
    num_input_features = X_train.shape[1]

    # define model
    model = Sequential()
    model.add(
        Dense(10,
              activation='relu',
              kernel_initializer='he_normal',
              input_shape=(num_input_features, )))
    model.add(Dense(8, activation='relu', kernel_initializer='he_normal'))
    model.add(Dense(1))

    # compile the model
    model.compile(optimizer='adam', loss='mse')

    # fit the model
    model.fit(X_train, y_train, epochs=150, batch_size=32, verbose=0)

    # evaluate the model
    error = model.evaluate(X_test, y_test, verbose=0)
    print('MSE: %.3f, RMSE: %.3f' % (error, sqrt(error)))

    return model
Esempio n. 25
0
def dnn_model(training=(), val=(), test=(), epochs = 100, layers=[1024, 512, 128]):
    #Performance dictionary
    performance = {}
    
    #Earlystopping
    callback = callbacks()
    
    i=0
    
    for layer in layers:
        #model
        model = Sequential()
        model.add(Flatten(input_shape=(32,32)))
        model.add(Dense(layer, activation='relu'))
        model.add(Dense(46, activation='softmax'))
        
        #Compliling the model
        model.compile(optimizer='adam', 
                      loss=SparseCategoricalCrossentropy(from_logits=True), 
                      metrics=['accuracy'])
        
        #Training the model
        history = model.fit(training[0], training[1],
                  epochs=epochs,
                  callbacks=[callback],
                 validation_data=(val[0], val[1]), verbose=3)
        
        #Visualizing the model
        vs.training_visualize(history, title='DNN {} layer'.format(layer))
        
        #Save model
        save_model(model, title='{}'.format(layer))
        
        #Evaluate model
        evaluation = model.evaluate(test[0], test[1])
        
        #Record Performance.
        performance[i] = [evaluation[0], evaluation[1], layer]   
        i = i+1
    performance = pd.DataFrame(data=performance)
    performance = performance.transpose()
    performance.columns = ['loss', 'accuracy', 'layer']
    performance.to_csv('../src/models/performance_DNN.csv')
    
    return performance
def ANN(X_train, y_train, X_test, y_test):
    model = Sequential()
    model.add(tf.keras.layers.Dense(40, activation=tf.nn.relu))
    model.add(tf.keras.layers.Dense(40, activation=tf.nn.relu))
    model.add(tf.keras.layers.Dense(20, activation=tf.nn.relu))
    model.add(tf.keras.layers.Dense(20, activation=tf.nn.relu))
    model.add(tf.keras.layers.Dense(10, activation=tf.nn.relu))
    model.add(tf.keras.layers.Dense(1, activation=tf.nn.sigmoid))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    X_train_fit = np.expand_dims(X_train, axis=2)
    X_test_fit = np.expand_dims(X_test, axis=2)
    model.fit(X_train, y_train, epochs=100, validation_data=(X_test, y_test))
    accuracy_NN = model.evaluate(X_test, y_test)
    model.save('Cancer_predictor_nn.h5')

    return accuracy_NN
Esempio n. 27
0
def train_conv(request):
    (x_train, y_train), (x_test, y_test) = load_happy()
    x_train = list(x_train)
    y_train = list(y_train)
    x_test = list(x_test)
    y_test = list(y_test)
    model = Sequential()
    # TODO: Why doesn't this work?
    model.add(keras.layers.ZeroPadding2D((3, 3)))
    model.add(
        Conv2D(32,
               kernel_size=(7, 7),
               strides=(1, 1),
               activation='relu',
               input_shape=(64, 64, 3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (11, 11), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1000, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss=keras.losses.binary_crossentropy,
                  optimizer=keras.optimizers.SGD(lr=0.01),
                  metrics=['accuracy'])
    history = model.fit(x_train,
                        y_train,
                        batch_size=5,
                        epochs=2,
                        verbose=1,
                        shuffle='batch',
                        validation_data=(x_test, y_test))

    test_loss, test_acc = model.evaluate(x_test, y_test)

    return Response({
        'params': history.params,
        'history': history.history,
        'test': {
            'test_loss': test_loss,
            'test_acc': test_acc
        }
    })
Esempio n. 28
0
class BinClassifier:
    def __init__(self):
        self.classif = Sequential()

        self.classif.add(
            Dense(28 * 28,
                  activation='relu',
                  kernel_initializer='random_normal',
                  input_dim=28 * 28,
                  name='features1'))

        self.classif.add(
            Dense(10,
                  activation='sigmoid',
                  kernel_initializer='random_normal',
                  input_dim=28 * 28,
                  name='features'))

        self.classif.compile(optimizer='adam',
                             loss='binary_crossentropy',
                             metrics=['accuracy'])

    def train(self, x_train, y_train):
        batch_size = 1000

        self.classif.fit(x_train,
                         y_train,
                         batch_size=batch_size,
                         epochs=100,
                         shuffle=True)
        return self.classif.evaluate(x_train, y_train)

    def test(self, x_test, full_return: bool = False):
        y_pred = self.classif.predict(x_test)
        return y_pred if full_return else np.maximum(y_pred - 0.5, 0)

    def save(self):
        self.classif.save('data/classifier.h5')
        print("classifier is saved")

    def load(self, classifier_name: str = 'classifier'):
        self.classif = load_model(f'data/{classifier_name}.h5')
        print("classifier is loaded")
def train_LSTM(X_train, Y_train, X_test, Y_test, NAME):
	model = Sequential()
	model.add(LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
	model.add(Dropout(0.2))
	model.add(BatchNormalization())

	model.add(LSTM(128, return_sequences=True))
	model.add(Dropout(0.1))
	model.add(BatchNormalization())

	model.add(LSTM(128))
	model.add(Dropout(0.1))
	model.add(BatchNormalization())


	model.add(Dense(32, activation='relu'))
	model.add(Dropout(0.2))

	model.add(Dense(1, activation='sigmoid'))


	EPOCHS = 200
	BATCH_SIZE = 64 
	opt = tf.keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9)
	model.compile(
	    loss='binary_crossentropy',
	    optimizer=opt,
	    metrics=['accuracy']
	)
	filepath = "LSTM-{epoch:02d}-{val_acc:.3f}"

	history = model.fit(
	    X_train, Y_train,
	    batch_size=BATCH_SIZE,
	    epochs=EPOCHS,
	    validation_data=(X_test, Y_test),
	)

	score = model.evaluate(X_test, Y_test, verbose=0)
	print('Test loss:', score[0])
	print('Test accuracy:', score[1])

	model.save("{}.model".format(NAME))
Esempio n. 30
0
def classify():
    data = pd.read_csv("malware_dataset.csv")
    data['Type'] = data['Api'].map({
        'GetSystemTimeAsFileTime': 'Trojan',
        '__exception__': 'Backdoor',
        'NtAllocateVirtualMemory': 'Downloader',
        'LdrLoadDll': 'Adware',
        'WriteConsoleA': 'Spyware',
        'NtQueryValueKey': 'Worms',
        'WSAStartup': 'Dropper',
        'NtCreateMutant': 'Virus',
        'LoadStringA': 'Rootkit',
        'CreateThread': 'Ransomware',
        'NtProtectVirtualMemory': 'Keylogger',
        'SetUnhandledExceptionFilter': 'grayware',
        'LdrGetDllHandle': 'Crimeware'
    })
    from sklearn.preprocessing import LabelEncoder
    labelencoder = LabelEncoder()
    X = labelencoder.fit_transform(data.Type)
    y = labelencoder.fit_transform(data.Api)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.22)
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Dense
    model = Sequential()
    model.add(Dense(2, input_dim=1, activation='relu'))
    model.add(Dense(2, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.fit(X_train, y_train, batch_size=30, epochs=100)
    eval_model = model.evaluate(X_train, y_train)
    eval_model
    y_pred = model.predict(X_test)
    y_pred = (y_pred > 0.5)
    y_pred
    result = model.predict([X_test])
    result
    print(model.predict(X_train))
    model.save('model.h5')