Пример #1
0
class PPOValueBrain:
    def __init__(
        self,
        learning_rate: float = 0.0001,
        hidden_layers_count: int = 0,
        neurons_per_hidden_layer: int = 0,
    ):
        self.model = Sequential()

        for i in range(hidden_layers_count):
            self.model.add(Dense(neurons_per_hidden_layer, activation=tanh))

        self.model.add(Dense(1, activation=linear, use_bias=True))
        self.model.compile(loss=mse, optimizer=Adam(lr=learning_rate))

    def predict(self, state: np.ndarray) -> np.ndarray:
        return self.model.predict(np.array((state,)))[0]

    def train(self, states: np.ndarray, targets: np.ndarray):
        self.model.train_on_batch(states, targets)

    def save_model(self, filename: str):
        self.model.save(f"{filename}_critic.h5")

    def load_model(self, filename: str):
        self.model = load_model(filename)
Пример #2
0
class Agent:
    def __init__(self):
        #Here we'll create the brain of our agent, a deep neural network with Keras
        self.model = Sequential()
        self.q_targets = []
        self.model.add(kl.Dense(10, input_dim=27))
        self.model.add(kl.Activation('relu'))
        self.model.add(kl.Dense(4, input_dim=10))
        self.model.add(kl.Activation('tanh'))
        self.model.compile(optimizer="adam", loss='MSE', metrics=['accuracy'])

    def pick_action(
            self, state,
            eps):  #Epsilon is the probability that wa take a random action
        #we chose if we take a random action
        if np.random.rand() < eps:
            return np.random.randint(4)
        #Else we take the best action acording to our Neural network(NN)
        else:
            return int(
                np.argmax(self.model.predict(np.reshape(state, (-1, 27)))))

    def train_model(self, states0, states1, actions, rewards):
        length = np.min((len(actions), 1000))
        self.q_targets = []
        prediction = self.model.predict(np.reshape(states0, (-1, 27)))

        for n in range(length):

            action_mask = np.array([1, 1, 1, 1])
            action_mask = np.logical_xor(action_mask, actions[n])
            pred = self.model.predict(np.reshape(states1[n], (-1, 27)))
            #print(pred.shape)
            q_target = prediction[n] * action_mask + actions[n] * (
                rewards[n])  # + 0.99*pred[0][np.argmax(pred)] )
            self.q_targets.append(q_target)
            print(states0[n], actions[n], rewards[n], q_target)

        self.model.fit(
            np.reshape(states0, (length, 27)),
            np.array(self.q_targets),
            batch_size=1,
        )
Пример #3
0
def sequential():
    model = Sequential()

    model.add(Dense(28, input_dim=784, activation=relu))
    model.add(Dense(28, activation=relu))
    model.add(Dense(10, activation=tf.nn.softmax))
    model.compile(optimizer=Adam(0.001),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(X, y, epochs=10, batch_size=10)
    y_pred = model.predict(test_X)
    print(y_pred)
Пример #4
0
def gru_test():
    '''
    使用return_sequences 返回所有time steps的输出
    不适用的时候只返回最后一次
    '''
    model = Sequential()
    model.add(CuDNNGRU(128))
    # model.add(CuDNNGRU(128, return_sequences=True))
    model.compile('rmsprop', 'mse')
    input_array = np.random.normal(size=(32, 10, 1))
    output_array = model.predict(input_array)
    print(output_array.shape)
    return model
class PointwiseNN(Ranker):
    def __init__(self):
        self.model = Sequential()
        self.model.add(Dense(128))
        self.model.add(Dense(1))
        self.model.compile(optimizer='adam', loss='mse')
        super().__init__()

    def fit(self, X_train, y_train):
        self.model.fit(x=X_train, y=y_train)

    def predict(self, X) -> ndarray:
        return self.model.predict(x=X)
Пример #6
0
def embedding_test():
    '''
        模型将输入一个大小为 (batch, input_length) 的整数矩阵。
        输入中最大的整数(即词索引)不应该大于 999 (词汇表大小)
        现在 model.output_shape == (None, 10, 64),其中 None 是 batch 的维度。
    '''
    model = Sequential()
    # model.add(Embedding(1000, 64, input_length=10))
    model.add(Embedding(160, 4))
    # input_array = np.random.randint(1000, size=(32, 10))
    input_array = np.random.randint(160, size=(5, 4, 4))
    model.compile('rmsprop', 'mse')
    output_array = model.predict(input_array)
    print(output_array.shape)
    return model
def test_masking_fixed_length(get_random_data):
    nb_samples = 2
    timesteps = 10
    embedding_dim = 4
    output_dim = 5
    embedding_num = 12

    crf_loss_instance = ConditionalRandomFieldLoss()

    x, y = get_random_data(nb_samples,
                           timesteps,
                           x_high=embedding_num,
                           y_high=output_dim)
    # right padding; left padding is not supported due to the tf.contrib.crf
    x[0, -4:] = 0

    # test with masking, fix length
    model = Sequential()
    model.add(
        Embedding(embedding_num,
                  embedding_dim,
                  input_length=timesteps,
                  mask_zero=True))
    model.add(CRF(output_dim, name="crf_layer"))
    model.compile(optimizer='adam', loss={"crf_layer": crf_loss_instance})

    model.fit(x, y, epochs=1, batch_size=1)
    model.fit(x, y, epochs=1, batch_size=2)
    model.fit(x, y, epochs=1, batch_size=3)
    model.fit(x, y, epochs=1)

    # check mask
    y_pred = model.predict(x)
    assert (y_pred[0, -4:] == 0).all()  # right padding
    # left padding not working currently due to the tf.contrib.crf.*
    # assert (y_pred[1, :5] == 0).all()

    # test saving and loading model
    MODEL_PERSISTENCE_PATH = './test_saving_crf_model.h5'
    model.save(MODEL_PERSISTENCE_PATH)
    load_model(MODEL_PERSISTENCE_PATH, custom_objects={'CRF': CRF})

    try:
        os.remove(MODEL_PERSISTENCE_PATH)
    except OSError:
        pass
Пример #8
0
def get_sparse_mlp(ws, bs, ls, reference):
    """
    :param ws: Weights of the MLP (as a list)
    :param bs: Biases of the MLP (as a list)
    :param ls: link functions
    :param reference: baselines (aka reference points)
    :return: (keras) sparse version of the model
    """
    # New MLP number of neurons
    times = np.ones(len(ls)).astype(np.int)
    for j in range(len(ls) - 2, -1, -1):
        times[j] = np.size(ws[j], 1) * times[j + 1]

    # build sparse model
    sparse_model = Sequential()
    for j in range(len(ls)):
        # Need to store dimensions in some scalar
        n_neurons_realnet = np.size(ws[j], 0)
        # Initialize Weight vector to 0s
        this_w = np.zeros((n_neurons_realnet * times[j], times[j]))
        # and biases
        this_b = np.zeros(times[j])
        # Fill the biases and the weights in the correct place
        col = 0
        for i in range(np.size(this_w, 1)):
            if col == np.size(ws[j], 1): col = 0
            this_w[i * n_neurons_realnet:(i + 1) * n_neurons_realnet,
                   i] = ws[j][:, col]
            this_b[i] = np.asarray(bs[j])[col]
            col += 1
        # Add layer to the network
        this_dense = Dense(units=np.size(this_w, 1),
                           activation=ls[j],
                           input_shape=(np.size(this_w, 0), ))
        sparse_model.add(this_dense)
        this_dense.set_weights([this_w, this_b])
    # compile the new (sparse) model
    opt = keras.optimizers.Adam()
    sparse_model.compile(loss=keras.losses.binary_crossentropy,
                         optimizer=opt,
                         metrics=['accuracy'])

    # Print reference
    at_reference = sparse_model.predict(np.array(reference).reshape(1, -1))
    print("Prediction at the reference point is: ", at_reference)
    return sparse_model
def train_model(X, Y, X_test, Y_test, nr_steps):
    n_batch = 14000
    n_epoch = 1000
    n_neurons = 30

    # design network
    model = Sequential()
    model.add(
        LSTM(units=n_neurons,
             return_sequences=True,
             batch_input_shape=(n_batch, X.shape[1], X.shape[2]),
             stateful=True))

    model.add(Dropout(0.2))

    # Adding a second LSTM layer and Dropout layer
    model.add(LSTM(units=n_neurons, return_sequences=True))
    model.add(Dropout(0.2))

    model.add(Dense(1))
    optimizer = optimizers.Adam(clipvalue=0.5)
    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=['acc'])

    for z in range(0, len(X) // 4, n_batch):
        xChunk = X[z:z + n_batch]
        yChunk = Y[z:z + n_batch]

        model.fit(xChunk,
                  yChunk,
                  epochs=n_epoch,
                  batch_size=n_batch,
                  verbose=1,
                  shuffle=False)
        model.reset_states()

    yhat = model.predict(X_test[0:n_batch, :, :],
                         batch_size=n_batch,
                         steps=nr_steps)
    for i in range(len(Y_test[0:n_batch])):
        print("Extected : " + str(Y[i]) + " but actually: " + str(yhat[i][0]))
    error = 1
    for j in range(len(Y_test[0:n_batch])):
        error = error * (abs(Y_test[j] - yhat[j][0]) / yhat[j][0]) * 100
    print("error:   " + str(error) + "%")
class Reinforce_with_mean_baselineValueBrain:
    def __init__(self,
                 learning_rate: float = 0.0001,
                 hidden_layers_count: int = 0,
                 neurons_per_hidden_layer: int = 0):
        self.model = Sequential()

        for i in range(hidden_layers_count):
            self.model.add(Dense(neurons_per_hidden_layer, activation=tanh))

        self.model.add(Dense(1, activation=softmax, use_bias=True))
        self.model.compile(loss=mse, optimizer=Adam(lr=learning_rate))

    def predict(self, state: np.ndarray) -> np.ndarray:
        return self.model.predict(np.array((state, )))[0]

    def train(self, states: np.ndarray, targets: np.ndarray):
        self.model.train_on_batch(states, targets)
Пример #11
0
def test_viterbi_tags(numpy_crf):
    logits = np.array([
        [[0, 0, .5, .5, .2], [0, 0, .3, .3, .1], [0, 0, .9, 10, 1]],
        [[0, 0, .2, .5, .2], [0, 0, 3, .3, .1], [0, 0, .9, 1, 1]],
    ])
    transitions = np.array([
        [0.1, 0.2, 0.3, 0.4, 0.5],
        [0.8, 0.3, 0.1, 0.7, 0.9],
        [-0.3, 2.1, -5.6, 3.4, 4.0],
        [0.2, 0.4, 0.6, -0.3, -0.4],
        [1.0, 1.0, 1.0, 1.0, 1.0]
    ])

    boundary_transitions = np.array([0.1, 0.2, 0.3, 0.4, 0.6])

    # Use the CRF Module with fixed transitions to compute the log_likelihood
    crf = CRF(
        units=5,
        use_kernel=False,  # disable kernel transform
        chain_initializer=initializers.Constant(transitions),
        use_boundary=True,
        boundary_initializer=initializers.Constant(boundary_transitions),
        name="crf_layer"
    )
    mask = np.array([
            [1, 1, 1],
            [1, 1, 0]
    ])

    crf_loss_instance = ConditionalRandomFieldLoss()

    model = Sequential()
    model.add(layers.Input(shape=(3, 5)))
    model.add(MockMasking(mask_shape=(2, 3), mask_value=mask))
    model.add(crf)
    model.compile('adam', loss={"crf_layer": crf_loss_instance})

    # Separate the tags and scores.
    result = model.predict(logits)

    numpy_crf_instance = numpy_crf(logits, mask, transitions, boundary_transitions, boundary_transitions)
    expected, _ = numpy_crf_instance.decode()

    np.testing.assert_equal(result, expected)
Пример #12
0
def regression(x_data, y_data):
    # 构建一个顺序模型
    model = Sequential()

    # 在模型中添加一个全连接层
    # 神经网络结构:1-10-1,即输入层为1个神经元,隐藏层10个神经元,输出层1个神经元。

    # 激活函数加法1
    model.add(Dense(units=10, input_dim=1))
    model.add(Activation('tanh'))
    model.add(Dense(units=1))
    model.add(Activation('tanh'))

    # 激活函数加法2
    # model.add(Dense(units=10, input_dim=1, activation='relu'))
    # model.add(Dense(units=1, activation='relu'))

    # 定义优化算法
    sgd = SGD(lr=0.3)
    # sgd: Stochastic gradient descent,随机梯度下降法
    # mse: Mean Squared Error, 均方误差
    model.compile(optimizer=sgd, loss='mse')

    # 进行训练
    for step in range(3001):
        # 每次训练一个批次
        cost = model.train_on_batch(x_data, y_data)
        # 每500个batch打印一次cost值
        if step % 500 == 0:
            print('cost: ', cost)
    # 打印权值和偏置值
    W, b = model.layers[0].get_weights()
    print('W:', W, ' b: ', b)
    print(len(model.layers))

    # 把x_data输入网络中,得到预测值y_pred
    y_pred = model.predict(x_data)

    # 显示随机点
    plt.scatter(x_data, y_data)
    # 显示预测结果
    plt.plot(x_data, y_pred, 'r-', lw=3)
    plt.show()
Пример #13
0
def long_short_term_memory(data, settings):
    """Creates a Long short-term memory model (LSTM) and predictions.

    Args:
        data: pandas.DataFrame.
        settings: Dictionary object containing settings parameters.
    Returns:
        A dictionary containing the LSTM model and predictions.
    """

    #  INSTANTIATE MODEL
    model = Sequential()

    #  TRAIN DATA GENERATOR
    train_generator = create_generator(data['train'],
                                       settings['morph'],
                                       shuffle=True)

    #  ADDING LAYERS TO MODEL
    add_lstm_layers(model, data, settings, train_generator[0][0].shape)

    #  COMPILE THE MODEL
    model.compile(loss=settings['loss'], optimizer=settings['optimizer'])

    #  TRAIN USING TRAIN DATA
    model.fit_generator(train_generator,
                        steps_per_epoch=len(train_generator),
                        epochs=settings['epochs'],
                        verbose=0)

    #  TEST DATA GENERATOR
    test_generator = create_generator(data['test'],
                                      settings['morph'],
                                      shuffle=False)

    #  PREDICT USING TEST DATA
    predictions = model.predict(test_generator)

    # denormalized_predictions = ""

    return {'model': model, 'predictions': predictions}
Пример #14
0
class DQNBrain:
    def __init__(
        self,
        output_dim: int,
        learning_rate: float = 0.0001,
        hidden_layers_count: int = 0,
        neurons_per_hidden_layer: int = 0,
        activation: str = "tanh",
        using_convolution: bool = False,
    ):
        self.model = Sequential()

        if using_convolution:
            self.model.add(Conv2D(64, kernel_size=3, activation=activation))
            self.model.add(Conv2D(32, kernel_size=3, activation=activation))
            self.model.add(Flatten())
            self.model.add(Dense(neurons_per_hidden_layer, activation=activation))
        else:
            for _ in range(hidden_layers_count):
                self.model.add(Dense(neurons_per_hidden_layer, activation=activation))

        self.model.add(Dense(output_dim, activation=linear, use_bias=False))
        self.model.compile(loss=mse, optimizer=Adam(lr=learning_rate))

    def predict(self, state: np.ndarray) -> np.ndarray:
        return self.model.predict(np.array((state,)))[0]

    def train(self, state: np.ndarray, chosen_action_mask: np.ndarray, target: float):
        target_vec = chosen_action_mask * target + (
            1 - chosen_action_mask
        ) * self.predict(state)
        self.model.train_on_batch(np.array((state,)), np.array((target_vec,)))

    def save_model(self, filename: str):
        self.model.save(f"{filename}.h5")

    def load_model(self, filename: str):
        self.model = load_model(filename)
Пример #15
0
class DQNBrain:
    def __init__(self,
                 output_dim: int,
                 learning_rate: float = 0.0001,
                 hidden_layers_count: int = 0,
                 neurons_per_hidden_layer: int = 0):
        self.model = Sequential()

        for i in range(hidden_layers_count):
            self.model.add(Dense(neurons_per_hidden_layer, activation=tanh))

        self.model.add(Dense(output_dim, activation=linear, use_bias=False))
        self.model.compile(loss=mse, optimizer=Adam(lr=learning_rate))

    def predict(self, state: np.ndarray) -> np.ndarray:
        return self.model.predict(np.array((state, )))[0]

    def train(self, state: np.ndarray, chosen_action_mask: np.ndarray,
              target: float):
        target_vec = chosen_action_mask * target + \
                     (1 - chosen_action_mask) * self.predict(state)
        self.model.train_on_batch(np.array((state, )), np.array(
            (target_vec, )))
def test_crf_viterbi_accuracy(get_random_data):
    nb_samples = 2
    timesteps = 10
    embedding_dim = 4
    output_dim = 5
    embedding_num = 12

    crf_loss_instance = ConditionalRandomFieldLoss()

    x, y = get_random_data(nb_samples,
                           timesteps,
                           x_high=embedding_num,
                           y_high=output_dim)
    # right padding; left padding is not supported due to the tf.contrib.crf
    x[0, -4:] = 0

    # test with masking, fix length
    model = Sequential()
    model.add(
        Embedding(embedding_num,
                  embedding_dim,
                  input_length=timesteps,
                  mask_zero=True))
    model.add(CRF(output_dim, name="crf_layer"))
    model.compile(optimizer='rmsprop',
                  loss={"crf_layer": crf_loss_instance},
                  metrics=[crf_viterbi_accuracy])

    model.fit(x, y, epochs=1, batch_size=10)

    # test viterbi_acc
    y_pred = model.predict(x)
    _, v_acc = model.evaluate(x, y)
    np_acc = (y_pred[x > 0] == y[x > 0]).astype('float32').mean()
    print(v_acc, np_acc)
    assert np.abs(v_acc - np_acc) < 1e-4
Пример #17
0
model.add(Dense(2, activation='softmax'))

# 3. model fitting config
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# 4. model check point config
model_directory = os.path.join(os.getcwd(), 'model')
if not os.path.exists(model_directory):
    os.mkdir(model_directory)

checkpoint = ModelCheckpoint(
    filepath=os.path.join(model_directory, '{epoch:03d}-{val_loss: 4f}.h5'),
    minitor='val_loss',         # val_loss(시험셋 오차), loss(학습셋 오차), val_accuracy(시험셋 정확도), accuracy(학습셋 정확도)
    verbose=1,
    save_best_only=True
)

# 5. model fitting
model.fit(x, t, validation_split=0.2, epochs=200, batch_size=100, verbose=1, callbacks=[checkpoint])

# 6. result
result = model.evaluate(x, t, verbose=0)
print(f'\n(Loss, Accuracy)=({result[0]}, {result[1]})')

# 7. predict
data = np.array([[8.5, 0.21, 0.26, 9.25, 0.034, 73, 142, 0.9945, 3.05, 0.37, 11.4, 6]])
predict = model.predict(data)
index = np.argmax(predict)
wines = ['Red Wine', 'White Wine']
print(wines[index])
Пример #18
0
class NeuralNetwork(object):
    def __init__(self):
        self.model = None

    def createModel(self):
        """Create and compile the keras model. See layers-18pct.cfg and 
           layers-params-18pct.cfg for the network model, 
           and https://code.google.com/archive/p/cuda-convnet/wikis/LayerParams.wiki 
           for documentation on the layer format.
        """
        self.model = Sequential()
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                input_shape=(32, 32, 3),
                                data_format="channels_last",
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.MaxPooling2D(pool_size=(3, 3),
                                      strides=(2, 2),
                                      padding='same'))
        self.model.add(
            keras.layers.BatchNormalization(
                axis=1,
                momentum=0.99,
                epsilon=0.001,
            ))
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.AveragePooling2D(pool_size=(3, 3),
                                          strides=(2, 2),
                                          padding='same'))
        self.model.add(
            keras.layers.BatchNormalization(axis=-1,
                                            momentum=0.99,
                                            epsilon=0.001))
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.AveragePooling2D(pool_size=(3, 3),
                                          strides=(2, 2),
                                          padding='same'))
        self.model.add(keras.layers.Flatten())
        self.model.add(keras.layers.Dense(10, activation=tf.nn.softmax))

        self.model.compile(optimizer=keras.optimizers.Adam(),
                           loss='sparse_categorical_crossentropy',
                           metrics=['accuracy'])

    def train(self, train_data, train_labels, epochs):
        """Train the keras model
        
        Arguments:
            train_data {np.array} -- The training image data
            train_labels {np.array} -- The training labels
            epochs {int} -- The number of epochs to train for
        """

        self.model.fit(train_data, train_labels, epochs=epochs, batch_size=128)

        pass

    def evaluate(self, eval_data, eval_labels):
        """Calculate the accuracy of the model
        
        Arguments:
            eval_data {np.array} -- The evaluation images
            eval_labels {np.array} -- The labels for the evaluation images
        """

        return self.model.evaluate(eval_data, eval_labels)[1]

        pass

    def test(self, test_data):
        """Make predictions for a list of images and display the results
        
        Arguments:
            test_data {np.array} -- The test images
        """

        return self.model.predict(test_data)

        pass

    ## Exercise 7 Save and load a model using the keras.models API
    def saveModel(self, saveFile="model.h5"):
        """Save a model using the keras.models API
        
        Keyword Arguments:
            saveFile {str} -- The name of the model file (default: {"model.h5"})
        """

        keras.models.save_model(self.model, saveFile)

        pass

    def loadModel(self, saveFile="model.h5"):
        """Load a model using the keras.models API
        
        Keyword Arguments:
            saveFile {str} -- The name of the model file (default: {"model.h5"})
        """

        self.model = keras.models.load_model(saveFile)

        pass
''' 
xor_model = Sequential()
xor_model.add(Dense(1024, input_dim=2,name="First_Layer"))
xor_model.add(Activation('relu',name="Relu_Activation"))
xor_model.add(Dense(1,name="Dense_Layer"))
xor_model.add(Activation('sigmoid',name="Sigmoid_Activation"))
'''

# Variante 2
xor_model = Sequential()
xor_model.add(Dense(1024, input_dim=2, activation="relu"))
xor_model.add(Dense(1, activation="sigmoid"))

# Variante 3 als Array
'''
xor_model = Sequential([
    Dense(1024, input_dim=2),
    Activation('relu'),
    Dense(1),
    Activation('sigmoid')
])
'''

xor_model.summary()
sgd = SGD(lr=0.01)

# Modell wird festgelegt und trainiert
xor_model.compile(loss="mean_squared_error", optimizer=sgd, metrics=["mae"])
xor_model.fit(input_data, output_data, batch_size=1, epochs=3000, verbose=1)
pprint(xor_model.predict(input_data))
Пример #20
0
activities_no = int(input("Is the activities_no::"))
activities_yes = int(input("Is the activities_yes::"))
nursery_no = int(input("Is the nursery_no::"))
nursery_yes = int(input("Is the nursury_yes::"))
higher_no = int(input("Is the higher_no::"))
higher_yes = int(input("Is the higher_yes::"))
internet_no = int(input("Is internet_no::"))
internet_yes = int(input("Is internet_yes::"))
romantic_no = int(input("Is romantic_no::"))
romantic_yes = int(input("Is romantic_yes::"))

#calculate pridictions
sample = [[
    age, Medu, Fedu, traveltime, studytime, failures, famrel, freetime, goout,
    Dalc, Walc, health, absences, school_GP, school_MS, sex_F, sex_M,
    address_R, address_U, famsize_GT3, famsize_LE3, Pstatus_A, Pstatus_T,
    Mjob_at_home, Mjob_health, Mjob_other, Mjob_services, Mjob_teacher,
    Fjob_at_home, Fjob_health, Fjob_other, Fjob_services, Fjob_teacher,
    reason_course, reason_home, reason_other, reason_reputation,
    guardian_father, guardian_mother, guardian_other, schoolsup_no,
    schoolsup_yes, famsup_no, famsup_yes, paid_no, paid_yes, activities_no,
    activities_yes, nursery_no, nursery_yes, higher_no, higher_yes,
    internet_no, internet_yes, romantic_no, romantic_yes
]]
predict = model.predict(sample)
print(" ")
if predict < 0.5:
    print("The student may suffer in the examination")
else:
    print("The student is likely to pass the examination")
Пример #21
0
epochs = 1000
batch_size = 5
# GRU参数: return_sequences=True GRU输出为一个序列。默认为False,输出一个值。
# input_dim: 输入单个样本特征值的维度
# input_length: 输入的时间点长度
model = Sequential()
model.add(
    GRU(units=10,
        return_sequences=True,
        input_dim=train_x.shape[-1],
        input_length=train_x.shape[1]))
model.add(GRU(units=50))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=1)
y_pred = model.predict(test_x)

rms = np.sqrt(np.mean(np.power((test_y - y_pred), 2)))
print(rms)
print(y_pred.shape)
print(test_y.shape)

x_axis = np.arange(1, np.shape(test_y)[0] + 1)
plt.xlabel('Time')
plt.ylabel('Price')
plt.title('Stock Price epoch:%d batch_size:%d rms:%f' %
          (epochs, batch_size, rms))
plt.plot(x_axis, test_y, label='True value')
plt.plot(x_axis, y_pred.reshape(1, -1)[0], label='Predict value')
plt.legend()
plt.show()
Пример #22
0
def engine():
    if request.method == 'GET':
        algo = request.args.get('algo')
        ptype = request.args.get('ptype')
        tick = request.args.get('tick')
        daysx = request.args.get('daysx')
        if algo == "Delta":
            algo = "adadelta"
        elif algo == "Meta":
            algo = "adam"
        elif algo == "Gradient":
            algo = "adagrad"
        #importing the packages
        #part 1
        daysx = int(daysx)
        import datetime as dt
        import urllib.request, json
        import pandas as pd  #3
        import numpy as np  #3
        #        import matplotlib.pyplot as plt
        #        from matplotlib.pylab import rcParams
        from sklearn.preprocessing import MinMaxScaler
        #used for setting the output figure size
        #        rcParams['figure.figsize'] = 20,10
        #to normalize the given input data
        scaler = MinMaxScaler(feature_range=(0, 1))
        #to read input data set (place the file name inside  ' ') as shown below
        ticker = tick

        api_key = '3T9YAWICQG9J42JM'

        url_string = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=%s&outputsize=compact&apikey=%s" % (
            ticker, api_key)

        todataframe = pd.DataFrame()

        with urllib.request.urlopen(url_string) as url:
            datax = json.loads(url.read().decode())

            datax = datax['Time Series (Daily)']
            df = pd.DataFrame(
                columns=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
            for k, v in datax.items():
                date = dt.datetime.strptime(k, '%Y-%m-%d')
                data_row = [
                    date.date(),
                    float(v['3. low']),
                    float(v['2. high']),
                    float(v['4. close']),
                    float(v['1. open']),
                    float(v['5. volume'])
                ]
                #                print(data_row)
                df.loc[-1, :] = data_row
                df.index = df.index + 1

        todataframe = df
        #todataframe.head()
        #print(todataframe)

        #part 2

        #todataframe['Date'] = pd.to_datetime(todataframe.Date,format='%Y-%m-%d')
        #todataframe.index = todataframe['Date']
        #plt.figure(figsize=(16,8))
        #plt.plot(todataframe['Close'], label='Closing Price')

        #part 3

        from sklearn.preprocessing import MinMaxScaler
        from tensorflow.python.keras.layers import Dense, Dropout, LSTM
        from tensorflow.python.keras import Sequential
        #dataframe creation
        seriesdata = todataframe.sort_index(ascending=True, axis=0)
        new_seriesdata = pd.DataFrame(index=range(0, len(todataframe)),
                                      columns=['Date', ptype])
        length_of_data = len(seriesdata)
        for i in range(0, length_of_data):
            new_seriesdata['Date'][i] = seriesdata['Date'][i]
            new_seriesdata[ptype][i] = seriesdata[ptype][i]
        #setting the index again
        new_seriesdata.index = new_seriesdata.Date
        new_seriesdata.drop('Date', axis=1, inplace=True)
        #creating train and test sets this comprises the entire data’s present in the dataset
        myseriesdataset = new_seriesdata.values
        totrain = myseriesdataset[0:new_seriesdata.size, :]
        tovalid = myseriesdataset[new_seriesdata.size:, :]

        #print(len(totrain))
        #print(len(tovalid))

        #part 4

        scalerdata = MinMaxScaler(feature_range=(0, 1))
        scale_data = scalerdata.fit_transform(myseriesdataset)
        x_totrain, y_totrain = [], []
        length_of_totrain = len(totrain)
        for i in range(60, length_of_totrain):
            x_totrain.append(scale_data[i - 60:i, 0])
            y_totrain.append(scale_data[i, 0])
        x_totrain, y_totrain = np.array(x_totrain), np.array(y_totrain)
        x_totrain = np.reshape(x_totrain,
                               (x_totrain.shape[0], x_totrain.shape[1], 1))
        #LSTM neural network
        lstm_model = Sequential()
        lstm_model.add(
            LSTM(units=50,
                 return_sequences=True,
                 input_shape=(x_totrain.shape[1], 1)))
        lstm_model.add(LSTM(units=50))
        lstm_model.add(Dense(1))
        lstm_model.compile(loss='mean_squared_error', optimizer=algo)
        lstm_model.fit(x_totrain, y_totrain, epochs=3, batch_size=1, verbose=2)
        #predicting next data stock price
        myinputs = new_seriesdata[len(new_seriesdata) -
                                  (len(tovalid) + daysx) - 60:].values
        myinputs = myinputs.reshape(-1, 1)
        myinputs = scalerdata.transform(myinputs)
        tostore_test_result = []
        for i in range(60, myinputs.shape[0]):
            tostore_test_result.append(myinputs[i - 60:i, 0])
        tostore_test_result = np.array(tostore_test_result)
        tostore_test_result = np.reshape(
            tostore_test_result,
            (tostore_test_result.shape[0], tostore_test_result.shape[1], 1))
        myclosing_priceresult = lstm_model.predict(tostore_test_result)
        myclosing_priceresult = scalerdata.inverse_transform(
            myclosing_priceresult)

        #part 5

        #print(len(tostore_test_result));
        #        print(myclosing_priceresult);
        xm = myclosing_priceresult.tolist()

        return jsonify(xm)
class Predictioner:
    def __init__(self):
        self.model = Sequential()
        self.setup_model()
        self.compile_model()

    def update_input(self, train_x, train_y):
        self.push_train_sets(train_x, train_y)
        self.y_scaler = MinMaxScaler()
        self.x_scaler = MinMaxScaler()
        self.reshape_train_sets()
        self.adjust_scalers()

    def push_train_sets(self, train_x, train_y):
        self.train_x = train_x
        self.train_y = train_y

    def reshape_train_sets(self):
        self.train_x = reshaper(self.train_x)
        self.train_y = reshaper(self.train_y)

    def adjust_scalers(self):
        self.train_x = self.x_scaler.fit_transform(self.train_x)
        self.train_y = self.y_scaler.fit_transform(self.train_y)

    def setup_model(self):
        self.model.add(
            Dense(99,
                  input_dim=1,
                  activation='softmax',
                  kernel_initializer='he_uniform'))
        self.model.add(
            Dense(120, activation='tanh', kernel_initializer='he_uniform'))
        self.model.add(
            Dense(256, activation='tanh', kernel_initializer='he_uniform'))
        self.model.add(
            Dense(90, activation='relu', kernel_initializer='he_uniform'))
        self.model.add(
            Dense(20, activation='tanh', kernel_initializer='he_uniform'))
        self.model.add(
            Dense(10, activation='tanh', kernel_initializer='he_uniform'))
        self.model.add(Dense(1))

    def compile_model(self):
        self.model.compile(loss='mse', optimizer='adam')

    def fit_model(self):
        self.model.fit(self.train_x,
                       self.train_y,
                       epochs=300,
                       batch_size=10,
                       verbose=0)

    def predict(self, prediction_interval_x):
        prediction_interval_x = reshaper(prediction_interval_x)
        prediction_interval_x = self.x_scaler.transform(prediction_interval_x)\

        predicted_y = self.model.predict(prediction_interval_x)

        self.x_plot = self.x_scaler.inverse_transform(self.train_x)
        self.y_plot = self.y_scaler.inverse_transform(self.train_y)
        self.x_pred_plot = self.x_scaler.inverse_transform(
            prediction_interval_x)
        self.y_pred_plot = self.y_scaler.inverse_transform(predicted_y)
        return self.y_pred_plot

    def visualize(self):
        pyplot.scatter(self.x_pred_plot, self.y_pred_plot, label='Predicted')
        pyplot.scatter(self.x_plot, self.y_plot, label='Actual', s=0.1)
        pyplot.title('Input (x) versus Output (y)')
        pyplot.xlabel('Input Variable (x)')
        pyplot.ylabel('Output Variable (y)')
        pyplot.legend()
        pyplot.show()
model.add(Dense(units=10, activation='relu', input_shape=(n_cols-1,)))
model.add(Dense(units=10, activation='relu', input_shape=(n_cols-1,)))
model.add(Dense(units=10, activation='relu', input_shape=(n_cols-1,)))
model.add(Dense(units=10, activation='relu', input_shape=(n_cols-1,)))
model.add(Dense(units=10, activation='relu', input_shape=(n_cols-1,)))

model.add(Dense(units=1))

model.compile(loss='mean_squared_error', optimizer='adam')
y = data.quality
x = data.drop('quality', axis=1)
y = y.values
x = x.values

msn = []
scaler = StandardScaler()

for items in range(0,51):
  xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size = 0.3)
  scaler.fit(xTrain)
  xTrain = scaler.transform(xTrain)
  xTest = scaler.transform(xTest)
  model.fit(xTrain, yTrain, epochs=100)
  predicted_y = model.predict(xTest)
  msn.append(mean_squared_error(yTest, predicted_y))

msn_arr = np.array(msn)
print('mean is:{} and standard derivation is:{}'.format(np.mean(msn_arr), np.std(msn_arr)))


Пример #25
0
'''
调整学习率的方法
默认lr=0.01,首先导入SGD:
from keras.optimizers import SGD
然后定义一个sgd:
sgd=SGD(lr=0.1)
'''
model = Sequential()

# 定义优化算法并指定学习率
sgd = SGD(lr=0.1)

#构建一个1-10-1结构的网络
model.add(Dense(units=10, input_dim=1, name='fc_1'))
model.add(Activation('tanh'))
model.add(Dense(units=1, input_dim=10, name='fc_2'))
model.add(Activation('tanh'))

# 编译模型,打印出模型结构
model.compile(optimizer=sgd, loss='mse')
model.summary()

for step in range(10001):
    cost = model.train_on_batch(x_data, y_data)
    if step % 500 == 0:
        print("cost", cost)

y_pred = model.predict(x_data)
plt.scatter(x_data, y_data)
plt.plot(x_data, y_pred, 'r-', lw=3)
plt.show()
Пример #26
0
def evaluate_model(trainX, trainy, testX, testy):
    verbose, epochs, batch_size = 0, 10, 32
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    # model = Sequential()
    # model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps, n_features)))
    # model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    # model.add(Dropout(0.5))
    # model.add(MaxPooling1D(pool_size=2))
    # model.add(Flatten())
    # model.add(Dense(100, activation='relu'))
    # model.add(Dense(n_outputs, activation='softmax'))
    # model.summary()
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    model = Sequential()
    model.add(LSTM(100, input_shape=(n_timesteps, n_features)))
    model.add(Dropout(0.5))
    model.add(Dense(100, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))
    # model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit network
    model.fit(trainX,
              trainy,
              epochs=epochs,
              batch_size=batch_size,
              verbose=verbose)
    # evaluate model
    # _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
    # return accuracy
    # save model
    model.save('./models/model_test.h5')
    keras_model = load_model('./models/model_test.h5')
    # onnx_model = onnxmltools.convert_keras(keras_model)
    # onnxmltools.utils.save_model(onnx_model, './models/model_test.onnx')
    tf.keras.utils.plot_model(
        model,
        to_file="CNN.png",
        show_shapes=False,
        show_dtype=False,
        show_layer_names=True,
        rankdir="TB",
        expand_nested=False,
        dpi=96,
    )

    # load model
    model = load_model('./models/model_test.h5')

    y_predict = model.predict(testX, batch_size=batch_size, verbose=verbose)
    # print('y_predict:', y_predict)
    y_predict = np.argmax(y_predict, axis=1)
    testy = np.argmax(testy, axis=1)
    y_true = np.reshape(testy, [-1])
    y_pred = np.reshape(y_predict, [-1])

    # evaluation
    accuracy = accuracy_score(y_true, y_pred)
    precision = precision_score(y_true, y_pred, average='macro')
    recall = recall_score(y_true, y_pred, average='macro')
    f1score = f1_score(y_true, y_pred, average='macro')
    return [accuracy, precision, recall, f1score]
class Predictioner:
    numpy.random.seed(1234)
    set_seed(1234)

    def __init__(self):
        self.model = Sequential()
        self.setup_default_model()
        self.compile_model()

    def save_model(self, path):
        self.model.save(path)

    def load_model(self, path):
        self.model = keras.models.load_model(path)

    def update_input(self, train_x, train_y):
        self.push_train_sets(train_x, train_y)
        self.y_scaler = MinMaxScaler()
        self.x_scaler = MinMaxScaler()
        self.reshape_train_sets()
        self.adjust_scalers()

    def push_train_sets(self, train_x, train_y):
        self.train_x = train_x
        self.train_y = train_y

    def reshape_train_sets(self):
        self.train_x = reshaper(self.train_x, self.train_x.shape[1])
        self.train_y = reshaper(self.train_y, 1)

    def adjust_scalers(self):
        self.train_x = self.x_scaler.fit_transform(self.train_x)
        self.train_y = self.y_scaler.fit_transform(self.train_y)

    def setup_default_model(self):
        self.model.add(Dense(30))
        self.model.add(Dense(90, activation='relu'))
        self.model.add(Dense(45, activation='relu'))
        self.model.add(Dense(20, activation='relu'))
        self.model.add(Dense(10, activation='relu'))
        self.model.add(Dense(1))

    def compile_model(self):
        self.model.compile(
            optimizer=keras.optimizers.Adam(),
            loss=keras.losses.mean_squared_error,
            metrics=[
                keras.metrics.mean_squared_error,
                keras.metrics.mean_squared_logarithmic_error,
                keras.metrics.mean_absolute_percentage_error,
                keras.metrics.mean_absolute_error,
            ]
        )

    def fit_model(self, verbose=0):
        self.model.fit(
            self.train_x,  # [:int(len(self.train_x) * 0.66)],
            self.train_y,  # [:int(len(self.train_y) * 0.66)],
            epochs=300,
            batch_size=10,
            verbose=verbose,
            # validation_data=(self.train_y[int(len(self.train_x) * 0.66):],
            #                 self.train_x[int(len(self.train_x) * 0.66):])
        )

    def evaluate(self, x_test, y_test):
        return self.model.evaluate(x_test, y_test, batch_size=12, verbose=1)

    def predict(self, prediction_interval_x):
        prediction_interval_x = self.x_scaler.transform(prediction_interval_x)
        predicted_y = self.model.predict(prediction_interval_x)

        self.x_plot = self.x_scaler.inverse_transform(self.train_x)
        self.y_plot = self.y_scaler.inverse_transform(self.train_y)
        self.x_pred_plot = self.x_scaler.inverse_transform(prediction_interval_x)
        self.y_pred_plot = self.y_scaler.inverse_transform(predicted_y)
        return self.y_pred_plot

    def visualize(self):
        pyplot.scatter(self.x_pred_plot, self.y_pred_plot, label='Predicted')
        pyplot.scatter(self.x_plot, self.y_plot, label='Actual')
        pyplot.title('Input (x) versus Output (y)')
        pyplot.xlabel('Input Variable (x)')
        pyplot.ylabel('Output Variable (y)')
        pyplot.legend()
        pyplot.show()
Пример #28
0
    train_labels.append(0)

train_labels = np.array(train_labels)
train_samples = np.array(train_samples)
test_labels = np.array(test_labels)
test_samples = np.array(test_samples)
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_train_samples = scaler.fit_transform((train_samples).reshape(-1, 1))
scaled_test_samples = scaler.fit_transform((test_samples).reshape(-1, 1))
model = Sequential([
    Dense(16, input_shape=(1, ), activation='relu'),
    Dense(32, activation='relu'),
    Dense(2, activation='softmax')
])
model.summary()
tf.losses.MeanSquaredError
model.compile(Adam(lr=0.0001),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(scaled_train_samples,
          train_labels,
          batch_size=20,
          epochs=10,
          validation_split=0.2,
          shuffle=True,
          verbose=2)
predictions = model.predict(scaled_test_samples, batch_size=10, verbose=0)
predictions = model.predict_classes(scaled_test_samples,
                                    batch_size=10,
                                    verbose=0)
print(predictions)
)  # Die 4. Spalte wird extrahiert und in einen Array von 1D-Array umgewandelt
output_data = to_categorical(output_data, 3)

# Aufbau des Modells mit Keras
iris_model = Sequential()
iris_model.add(Dense(5, input_shape=(4, ), activation="tanh"))
iris_model.add(Dense(24, activation="relu"))
iris_model.add(Dense(3, activation="softmax"))

sgd = SGD(lr=0.001)

iris_model.compile(loss="categorical_crossentropy",
                   optimizer=sgd,
                   metrics=["accuracy"])
iris_model.fit(x=input_data,
               y=output_data,
               batch_size=10,
               epochs=500,
               verbose=1)

# Single test
test = np.array([[5.1, 3.5, 1.4, 0.2], [5.9, 3., 5.1, 1.8],
                 [4.9, 3., 1.4, 0.2], [5.8, 2.7, 4.1, 1.]])
predictions = iris_model.predict(test)
index_max_predictions = np.argmax(predictions, axis=1)
print(index_max_predictions)

for i in index_max_predictions:
    print("Iris mit den Eigenschaften {} gehört zur Klasse: {}".format(
        test[i], iris_label_array[i]))
Пример #30
0
#getting the index out of 10e3 from the dictionary voc_size
onehot_repr = [one_hot(words, voc_size) for words in sent]
print(onehot_repr)

#word embedding representation
from tensorflow.python.keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import pad_sequences  #making sure the sentences are of equal size

from tensorflow.python.keras import Sequential  #needed for the embedding
import numpy as np

sent_length = 8  #set the max sent length
embedded_docs = pad_sequences(onehot_repr, padding='pre', maxlen=sent_length)
print(embedded_docs)

dim = 10  # how many features

#adding embedding layer to the sequential model
model = Sequential()
model.add(Embedding(voc_size, 10, input_length=sent_length))
model.compile()
model.summary()

#see how the words got converted
model.predict(embedded_docs).shape
embedded_docs[10]

model.predict(embedded_docs)[
    0]  #the 8 words; for each word, a vector of 10 floats