Ejemplo n.º 1
0
model.fit(X_train, y_train, epochs=200, verbose=False)

# Results - Accuracy
scores = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: %.2f%%\n" % (scores[1] * 100))
scores = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: %.2f%%\n" % (scores[1] * 100))

y_test_pred = model.predict_classes(X_test)
c_matrix = confusion_matrix(y_test, y_test_pred)
ax = sns.heatmap(c_matrix,
                 annot=True,
                 xticklabels=['No Diabetes', 'Diabetes'],
                 yticklabels=['No Diabetes', 'Diabetes'],
                 cbar=False,
                 cmap='Blues')
ax.set_xlabel('Prediction')
ax.set_ylabel('Actual')
plt.show()
plt.clf()

# roc curve
y_test_pred_probs = model.predict(X_test)
FPR, TPR, _ = roc_curve(y_test, y_test_pred_probs)
plt.plot(FPR, TPR)
plt.plot([0, 1], [0, 1], '--', color='black')  # 대각선
plt.title('ROC Curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
plt.clf()
Ejemplo n.º 2
0
def train_next_model(df):
    Y = df[['short-term_memorability', 'long-term_memorability']].values
    X = df[['inception', 'short_capt_pred', 'long_capt_pred']].values
    # X = df[['inception']].values
    print("X shape: ", X.shape)
    # X.shape = (6000,5191)
    # print("new X shape: ", X.shape)
    print("inception: ", X[0, 0])
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=1)
    nb_iterations = 8
    ## divided by 10 all regularizers values
    ## TODO: change activations params
    model = Sequential()
    ## TODO: add inception feature, change input_shape ?
    # model.add(Flatten(input_shape=(5191)))
    model.add(
        layers.Dense(12,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.0001),
                     input_dim=3))
    model.add(layers.Dropout(0.6))
    model.add(
        layers.Dense(12,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.0001)))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(2, activation='sigmoid'))

    # compile the model, changed from rmsprop to adagrad
    #Todo: try to change loss & metrics options (and some others ?)
    model.compile(optimizer='adagrad', loss='mse', metrics=['accuracy'])
    history = model.fit(X_train,
                        Y_train,
                        epochs=nb_iterations,
                        validation_data=(X_test, Y_test))
    # visualizing the model
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    ##len(loss) == nb_iteration ?
    epochs = range(1, len(loss) + 1)

    plt.plot(epochs, loss, 'bo', label='Training loss')
    plt.plot(epochs, val_loss, 'b', label='Validation loss')
    plt.title('Training and validation loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.show()

    plt.figure()
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    plt.plot(epochs, acc, 'bo', label='Training acc')
    plt.plot(epochs, val_acc, 'b', label='Validation acc')
    plt.title('Training and validation accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Acc')
    plt.legend()
    plt.show()
    predictions = model.predict(X_test)
    print('Short term Spearman\'s correlation coefficient is: %.3f' %
          getSpearmanCorScore(predictions[:, 0], Y_test[:, 0]))
    print('Long term Spearman\'s correlation coefficient is: %.3f' %
          getSpearmanCorScore(predictions[:, 1], Y_test[:, 1]))
Ejemplo n.º 3
0
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(train_X,
                    train_y,
                    epochs=10,
                    batch_size=72,
                    validation_data=(test_X, test_y),
                    verbose=2)
'''
    对数据绘图
'''
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()

# make the prediction,为了在原始数据的维度上计算损失,需要将数据转化为原来的范围再计算损失
yHat = model.predict(test_X)

inv_yHat = concatenate((yHat, test_x[:, 1:]), axis=1)  # 数组拼接
inv_yHat = inv_yHat[:, 0]

test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_x[:, 1:]), axis=1)
inv_y = inv_y[:, 0]

rmse = sqrt(mean_squared_error(inv_yHat, inv_y))
print('Test RMSE: %.3f' % rmse)
import os
if not os.path.exists("SavedModels/classifier"):
    os.makedirs("SavedModels/classifier")
classifier.save("SavedModels/classifier/model.h5")

#############################################
# Part 3 - Making new prediction
#############################################
from keras.models import load_model
classifier = load_model("SavedModels/classifier/model.h5")
import numpy as np
from keras.preprocessing import image
# load image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_2.jpg',
                            target_size=(64, 64))
# convert into array
# dimenstion = 64, 64, 3
test_image = image.img_to_array(test_image)
# add batch size as 4th dimension
test_image = np.expand_dims(test_image, axis=0)
# predict
result = classifier.predict(test_image)
# get class attributes
training_set.class_indices

if result[0][0] == 1:
    prediction = 'dog'
else:
    prediction = 'cat'
Ejemplo n.º 5
0
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(
    test_loss, test_acc))

# Task-1 : Plotting the graph for accuracy & loss values respectively of both train and test data.
print(history.history.keys())

# For accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

# For loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

# Task-2 : Printing an image and then predicting the digit of the image from the built model.
plt.imshow(test_data[3].reshape(28, 28), cmap='Greys')
plt.title("Digit from the test data")
plt.show()
pred = model.predict(test_data[3].reshape(1, 784))
print("Predicted digit:", pred.argmax())
Ejemplo n.º 6
0
def models():
    data = request.get_json()

    if data['model'] == 'svm':
        training = pd.read_csv('training.csv')
        testing = pd.read_csv('testing.csv')

        x_train = training.drop(columns=['target'])
        y_train = training['target']

        x_test = testing.drop(columns=['target'])
        y_test_actual = testing['target']

        classifier = SVC(kernel=data['kernel'])
        classifier.fit(x_train, y_train)

        y_test_obtained = classifier.predict(x_test)

        testing['obtained'] = y_test_obtained
        testing.to_csv('result.csv', index=False)

        plot_confusion_matrix(y_test_actual, y_test_obtained)
        plt.savefig('figure.png')

    if data['model'] == 'knn':
        training = pd.read_csv('training.csv')
        testing = pd.read_csv('testing.csv')

        x_train = training.drop(columns=['target'])
        y_train = training['target']

        x_test = testing.drop(columns=['target'])
        y_test_actual = testing['target']

        classifier = KNeighborsClassifier(n_neighbors=data['k'])
        classifier.fit(x_train, y_train)

        y_test_obtained = classifier.predict(x_test)

        testing['obtained'] = y_test_obtained
        testing.to_csv('result.csv', index=False)

        plot_confusion_matrix(y_test_actual, y_test_obtained)
        plt.savefig('figure.png')

    if data['model'] == 'logistic_regression':
        training = pd.read_csv('training.csv')
        testing = pd.read_csv('testing.csv')

        x_train = training.drop(columns=['target'])
        y_train = training['target']

        x_test = testing.drop(columns=['target'])
        y_test_actual = testing['target']

        classifier = LogisticRegression(random_state=0,
                                        solver="liblinear",
                                        penalty=data['penalty'])
        classifier.fit(x_train, y_train)

        y_test_obtained = classifier.predict(x_test)

        testing['obtained'] = y_test_obtained
        testing.to_csv('result.csv', index=False)

        plot_confusion_matrix(y_test_actual, y_test_obtained)
        plt.savefig('figure.png')

    if data['model'] == 'linear_regression':
        training = pd.read_csv('training.csv')
        testing = pd.read_csv('testing.csv')

        x_train = training.drop(columns=['target'])
        y_train = training['target']

        x_test = testing.drop(columns=['target'])
        y_test_actual = testing['target']

        classifier = LinearRegression()
        classifier.fit(x_train, y_train)

        y_test_obtained = classifier.predict(x_test)

        testing['obtained'] = y_test_obtained
        testing.to_csv('result.csv', index=False)

        plt.scatter(x_train, y_train, color='blue')
        plt.scatter(x_test, y_test_actual, color='green')
        plt.plot(x_test, y_test_obtained, color='red')
        plt.title(
            "MSE: " +
            str(metrics.mean_squared_error(y_test_actual, y_test_obtained)))
        plt.legend(['Fitted Line', 'Training Data', 'Actual Testing Data'])
        plt.savefig('figure.png')

    if data['model'] == 'nn':
        training = pd.read_csv('training.csv')
        testing = pd.read_csv('testing.csv')

        x_train = training.drop(columns=['target'])
        y_train = training['target']

        x_test = testing.drop(columns=['target'])
        y_test_actual = testing['target']

        model = Sequential()
        model.add(
            Dense(data['layers_dims'][0],
                  activation=data['layers_activation'][0],
                  input_shape=(x_train.shape[1], )))
        for i in range(1, len(data['layers_dims'])):
            model.add(
                Dense(data['layers_dims'][i],
                      activation=data['layers_activation'][i]))
        model.add(Dense(1))
        model.compile(optimizer='adam', loss='mean_squared_error')
        model.fit(x_train, y_train, validation_split=0.2, epochs=30)

        y_test_obtained = model.predict(x_test)

        testing['obtained'] = y_test_obtained
        testing.to_csv('result.csv', index=False)

    return jsonify([]), 200
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
history = model.fit(train_data,
                    train_labels_one_hot,
                    batch_size=256,
                    epochs=20,
                    verbose=1,
                    validation_data=(test_data, test_labels_one_hot))

[test_loss, test_acc] = model.evaluate(test_data, test_labels_one_hot)
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(
    test_loss, test_acc))

#graph
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('accuracy')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
n = 100
plt.imshow(test_images[100, :, :])
single_test = test_images[100, :, :]
single_test = single_test.reshape(1, dimData)

predicted = model.predict(single_test)
print(predicted)
plt.imshow(predicted)
Ejemplo n.º 8
0
                    Y_train,
                    epochs=400,
                    callbacks=earlyStopping,
                    validation_data=(X_val, Y_val))
#history = model.fit(X_train, Y_train, batch_size=50, epochs=400, validation_data=(X_val, Y_val)) #without early stopping

#print(history.history.keys())
plt.figure(1)
plt.plot(history.history['loss'], label="Training Loss")
plt.plot(history.history['val_loss'], label="Validation Loss")
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()

y_predicted = model.predict(x_test)
#print(y_predicted.shape)
for i in range(test_instances):
    index = y_predicted[i].argmax()
    y_predicted[i] = [0, 0]
    y_predicted[i, index] = 1

accuracy = accuracy_score(y_test_matrix, y_predicted)
print("accuracy = ", accuracy)
print(
    'Balanced accuracy: ',
    balanced_accuracy_score(y_test_matrix.argmax(axis=1),
                            y_predicted.argmax(axis=1)))
print('f measure = ',
      f1_score(y_test_matrix.argmax(axis=1), y_predicted.argmax(axis=1)))
conf_matrix = confusion_matrix(y_test_matrix.argmax(axis=1),
Ejemplo n.º 9
0
class DNN(Model):
    """
    This class is parent class for all Deep neural network models. Any class
    inheriting this class should implement `make_default_model` method which
    creates a model with a set of hyper parameters.
    """
    def __init__(self, input_shape, num_classes, **params):
        """
        Constructor to initialize the deep neural network model. Takes the input
        shape and number of classes and other parameters required for the
        abstract class `Model` as parameters.

        Args:
            input_shape (tuple): shape of the input
            num_classes (int): number of different classes ( labels ) in the data.
            **params: Additional parameters required by the underlying abstract
                class `Model`.

        """
        super(DNN, self).__init__(**params)
        self.input_shape = input_shape
        self.model = Sequential()
        self.make_default_model()
        self.model.add(Dense(num_classes, activation='softmax'))
        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        print(self.model.summary(), file=sys.stderr)
        self.save_path = self.save_path or self.name + '_best_model.h5'

    def load_model(self, to_load):
        """
        Load the model weights from the given path.

        Args:
            to_load (str): path to the saved model file in h5 format.

        """
        try:
            self.model.load_weights(to_load)
        except:
            sys.stderr.write("Invalid saved file provided")
            sys.exit(-1)

    def save_model(self):
        """
        Save the model weights to `save_path` provided while creating the model.
        """
        self.model.save_weights(self.save_path)

    def train(self, x_train, y_train, x_val=None, y_val=None, n_epochs=50):
        """
        Train the model on the given training data.


        Args:
            x_train (numpy.ndarray): samples of training data.
            y_train (numpy.ndarray): labels for training data.
            x_val (numpy.ndarray): Optional, samples in the validation data.
            y_val (numpy.ndarray): Optional, labels of the validation data.
            n_epochs (int): Number of epochs to be trained.

        """
        best_acc = 0
        if x_val is None or y_val is None:
            x_val, y_val = x_train, y_train
        for i in range(n_epochs):
            # Shuffle the data for each epoch in unison inspired
            # from https://stackoverflow.com/a/4602224
            p = np.random.permutation(len(x_train))
            x_train = x_train[p]
            y_train = y_train[p]
            self.model.fit(x_train, y_train, batch_size=32, epochs=1)
            loss, acc = self.model.evaluate(x_val, y_val)
            if acc > best_acc:
                best_acc = acc
        self.trained = True

    def predict_classes(self, x, batch_size=32, verbose=1) -> Tuple:
        return self.model.predict_classes(x, batch_size, verbose)

    def predict_one(self, sample):
        if not self.trained:
            sys.stderr.write(
                "Model should be trained or loaded before doing predict\n")
            sys.exit(-1)
            #print out array, see the thresholds. should normalize it (turn it to probabilities) and set our own thresholds (can adjust).
            #if neutral is a certain threshold, make it neutral? or if the others are low, then neutral?
        print(self.model.predict(np.array([sample])), file=sys.stderr)
        predicted_array = self.model.predict(np.array([sample]))
        #sys.stdout.write('line 2 to stdout ')
        #sys.stdout.write("Hello")
        return np.argmax(predicted_array)
        if (np.amax(predicted_array) > .98):
            return np.argmax(predicted_array)
        return 0

    def make_default_model(self) -> None:
        """
        Make the model with default hyper parameters
        """
        # This has to be implemented by child classes. The reason is that the
        # hyper parameters depends on the model.
        raise NotImplementedError()
LSTM_model.add(Dense(12, activation='sigmoid'))
LSTM_model.compile(loss="binary_crossentropy",
                   optimizer='adam',
                   metrics=['accuracy'])

# Training and validation of model is shown along with loss and accuracy per epoch
history = LSTM_model.fit(x_train,
                         y_train,
                         epochs=18,
                         batch_size=64,
                         validation_split=0.15,
                         callbacks=[
                             EarlyStopping(monitor='val_loss',
                                           patience=3,
                                           min_delta=0.0001)
                         ])

# Accuracy of model is calculated and printed
accuracy = LSTM_model.evaluate(x_test, y_test)
print('Accuracy: ', accuracy)

# Test data read in
testData = pd.read_json('nlp_test.json')
testData = testData.transpose()

# Test data cleaned and model used for prediction
testData['body'] = testData['body'].apply(clean)
test = tokenizer.texts_to_sequences(testData['body'].values)
test = pad_sequences(x, maxlen=100)
predictions = LSTM_model.predict(test)
Ejemplo n.º 11
0
print(scaled.shape)

sequenceLength = 20
n_features = len(data[0])

input = numpy.zeros(
    (len(scaled) - sequenceLength, sequenceLength, len(scaled[0])))
output = numpy.zeros((len(scaled) - sequenceLength, len(scaled[0])))
for i in range(0, len(scaled) - sequenceLength):
    for j in range(sequenceLength):
        input[i, j] = scaled[i + j]
    output[i] = scaled[i + sequenceLength]
print(input.shape)
print(output.shape)

model = Sequential()
model.add(
    LSTM(n_features,
         input_shape=(input.shape[1], input.shape[2]),
         return_sequences=True))
model.add(Dropout(0.1))
model.add(Dense(n_features))
#model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

fitted = model.fit(input, output, epochs=400, batch_size=20)

print(output[70])
print(scaler.inverse_transform([model.predict(input)[70]]))
Ejemplo n.º 12
0
input_data = np.array(inputs, dtype="float")
results = np.array(results)

(trainX, testX, trainY, testY) = train_test_split(input_data, results, test_size=0.25, random_state=42)
window = input_data.shape[1]
model = Sequential()
model.add(Dense(365*24, input_shape=(window,), kernel_initializer='normal', activation="relu"))
model.add(Dense(365, kernel_initializer='normal', activation="relu"))
model.add(Dense(12*4, kernel_initializer='normal', activation="relu"))
model.add(Dense(12, kernel_initializer='normal', activation="relu"))
model.add(Dense(1, kernel_initializer='normal'))


print("[INFO] training network...")
opt = SGD(lr=learning_rate)
model.compile(loss="mean_squared_error", optimizer='adam',
              metrics=["mse", "mae", "mape", "cosine"])
# train the neural network
H = model.fit(trainX, trainY, validation_data=(testX, testY),
              epochs=epochs, batch_size=32)

# save the model and label binarizer to disk
print("[INFO] serializing network...")
model.save(filename)

# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)
print("Mean squared log error:", mean_squared_log_error(testY, predictions[..., 0]))
Ejemplo n.º 13
0
class NeuralPlayer(object):
    def __init__(self, name, start_balance):
        self.money = start_balance
        self.hands = []
        self.bets = []
        self.name = name
        self.lose = 0

        self.features = []
        self.rewards = []

        self.model = Sequential()
        self.model.add(Dense(32, input_shape=(4, ), activation='relu'))
        self.model.add(Dense(64, activation='relu'))
        self.model.add(Dense(32, activation='relu'))
        self.model.add(Dense(1, activation='sigmoid'))
        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        self.model.load_weights('model/%s.h5' % self.name)

        self.recognizer_model = load_model(HAND_RECOGNIZER_MODEL_DIR)
        print(
            "%s: Hi, im %s, a neural network based player and im ready to play"
            % (name, name))

    def make_bet(self, cards, min_pot, monies, enemy_hands):
        t = np.array([cards])
        hand = self.recognizer_model.predict(t)
        self.hands.append(np.argmax(hand))
        m = np.append(monies, enemy_hands)
        features = np.append(np.argmax(hand), m)
        self.features.append(features)
        ff = np.array([features])
        prediction = self.model.predict(ff)[0][0] * 100
        bet = min(int(prediction), self.money)
        if min_pot <= bet < self.money:
            bet = bet
        elif bet <= min_pot < self.money:
            bet = min_pot
        elif bet <= min_pot and self.money < min_pot:
            bet = self.money
        elif bet >= min_pot and bet > self.money:
            bet = self.money
        else:
            bet = bet
        self.bets.append(bet)
        self.money -= bet
        print("%s: My bet is %s" % (self.name, bet))
        return bet

    def update(self, reward):
        self.money += reward
        print("%s: My balance is %s" % (self.name, self.money))

        self.rewards.append(np.array([0 if reward == 0 else reward / 200.0]))
        # res = self.model.fit(np.array(self.features), np.array(self.rewards), batch_size=32, epochs=10)
        x = np.array([self.features[-1]])
        y = np.array([self.rewards[-1]])
        self.model.train_on_batch(x, y)
        self.model.save_weights('model/%s.h5' % self.name)

        if self.money == 0:
            self.lose += 1
            return False
        else:
            return True
Ejemplo n.º 14
0
from keras.layers import Dense, Embedding, Input, GlobalAveragePooling1D
from keras import Sequential, Model
import numpy as np
embed = (np.arange(0, 20) / 20.0).reshape(10, 2)
model = Sequential()
model.add(
    Embedding(input_dim=10,
              output_dim=2,
              input_length=3,
              weights=[embed],
              trainable=False))
model.add(GlobalAveragePooling1D())
model.add(Dense(1, activation='sigmoid'))
X = model.predict(np.array([1, 2, 3]).reshape(1, 3))
print(X)
           show_shapes=True,
           show_layer_names=True)
# fit network
history = model.fit(train_X,
                    train_y,
                    epochs=epochs,
                    batch_size=batch_size,
                    callbacks=[learning_rate_reduction],
                    verbose=verbose,
                    validation_data=(test_X, test_y),
                    shuffle=False)

#################### Test with the model ####################

# make a prediction
yhat = model.predict(test_X)

# inverse scaler
yhat = yhat * scaler
y_true = test_y * scaler

yhat = process_Y(yhat)
y_true = process_Y(y_true)

Acc_test = (yhat == y_true).astype(int).sum().sum() / yhat.size

# make a prediction
yhat1 = model.predict(train_X)
# inverse scaler
yhat1 = yhat1 * scaler
y_true1 = train_y * scaler
Ejemplo n.º 16
0
# %% Load model
model = Sequential()
model.add(Dense(7, input_dim=6, activation='relu'))
model.add(Dense(14, activation='relu'))
model.add(Dense(28, activation='relu'))
model.add(Dense(56, activation='relu'))
model.add(Dense(112, activation='relu'))
model.add(Dense(128, activation='softmax'))

model.load_weights('models/NN_train-7-middle-item-as-label.h5')
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# %% Predicting
predictions = model.predict(queryData)
# sum_of_exps = np.sum

replacements = []
for i, prediction in enumerate(predictions):
    index = np.where(predictions[i] == np.max(predictions[i]))[0]
    # print(index[0])
    replacements.append(index[0])

# for i, prediction in enumerate(replacements):
#     print(prediction, ' ', np.where(label[i] == 1)[0][0])

# %%
queries = []
for i in range(len(queryData)):
    queries.append(
Ejemplo n.º 17
0
class NeuralNetwork(object):
    def __init__(self, input_nodes, hidden_nodes, output_nodes, lr=None):
        self.input_nodes = input_nodes
        self.hidden_nodes = hidden_nodes
        self.output_nodes = output_nodes
        self.lr = lr
        self.scales_x = []
        self.scales_y = []

        input_kernel_range = np.sqrt(6) / (np.sqrt(input_nodes) + np.sqrt(hidden_nodes))
        input_kernel_initializer = RandomUniform(minval=-input_kernel_range, maxval=input_kernel_range)
        input_layer = Dense(input_nodes,
                            kernel_initializer=input_kernel_initializer,
                            name='input')

        hidden_kernel_range = np.sqrt(6) / (np.sqrt(hidden_nodes) + np.sqrt(output_nodes))
        hidden_kernel_initializer = RandomUniform(minval=-hidden_kernel_range, maxval=hidden_kernel_range)
        hidden_layer = Dense(hidden_nodes,
                             kernel_initializer=hidden_kernel_initializer,
                             name='hidden')

        output_layer = Dense(output_nodes,
                             name='output')

        self.model = Sequential()
        self.model.add(input_layer)
        self.model.add(hidden_layer)
        self.model.add(output_layer)

    def train(self, x_train, y_train):
        self.set_normalize_scales(x_train, y_train)
        x_train = self.normalize(x_train, self.scales_x)
        y_train = self.normalize(y_train, self.scales_y)

        optimizer = SGD(lr=self.lr)
        self.model.compile(loss='mse', optimizer=optimizer)
        self.model.fit(x_train, y_train, batch_size=20, epochs=500)

    def evaluate(self, x_test, y_test):
        x_test = self.normalize(x_test, self.scales_x)
        y_test = self.normalize(y_test, self.scales_y)
        return self.model.evaluate(x_test, y_test)

    def predict(self, x):
        x = self.normalize(x, self.scales_x)
        y = self.model.predict(x)
        return self.unnormalize(y, self.scales_y)

    def set_normalize_scales(self, x, y):
        for i in range(x.shape[1]):
            mean, std = x[:, i].mean(), x[:, i].std()
            self.scales_x.append([mean, std])
        for i in range(y.shape[1]):
            mean, std = y[:, i].mean(), y[:, i].std()
            self.scales_y.append([mean, std])

    @staticmethod
    def normalize(data, scales):
        for i in range(0, len(scales)):
            mean, std = scales[i]
            data[:, i] = (data[:, i] - mean) / std
        return data

    @staticmethod
    def unnormalize(data, scales):
        for i in range(0, len(scales)):
            mean, std = scales[i]
            data[:, i] = data[:, i] * std + mean
        return data
Ejemplo n.º 18
0
class Model:
    def __init__(self):
        # The model
        self.model = Sequential()
        self.model.add(Dense(16, input_shape=[4,], activation='relu'))
        self.model.add(Dense(16, activation='relu'))
        self.model.add(Dense(2, activation='linear'))

        self.model.compile(optimizer=Adam(0.001), 
                           loss='mse')

        # Load the model if available
        try:
            self.model.load_weights('pole.h5')
        except:
            pass

        # The memory
        self.mem_size = 10000
        self.batch_size = 32
        self.index = 0

        self.states = np.zeros([self.mem_size, 4])
        self.states_ = np.zeros([self.mem_size, 4])
        self.actions = np.zeros([self.mem_size])
        self.rewards = np.zeros([self.mem_size])
        self.done = np.zeros([self.mem_size])

        # Greedy behavior
        self.e = 1
        self.e_decay = 0.0001

    def enough_mem(self):
        return self.index > self.batch_size

    def remember(self, state, reward, action, done, state_):
        self.states[self.index%self.mem_size] = state
        self.states_[self.index%self.mem_size] = state_
        self.actions[self.index%self.mem_size] = action
        self.rewards[self.index%self.mem_size] = reward
        self.done[self.index%self.mem_size] = done
        self.index += 1

    def save(self):
        self.model.save('pole.h5')
    
    def predict(self, state):
        if np.random.random() < self.e:
            return int(np.random.choice([0, 1]))
        
        out = self.model.predict(np.array([state]))
        return 1 if out[0][1] > out[0][0] else 0

    def train(self):
        if not self.enough_mem():
            return

        self.e -= self.e_decay
        self.e = 0.01 if self.e < 0.01 else self.e
        
        indexs = np.random.randint(0, self.mem_size, size=[self.batch_size])

        sample_s = [ np.array([self.states[i]]) for i in indexs ]
        sample_s_ = [ np.array([self.states_[i]]) for i in indexs ]
        sample_act = [ int(self.actions[i]) for i in indexs ]
        sample_r = [ self.rewards[i] for i in indexs ]
        sample_d = [ int(self.done[i]) for i in indexs ]

        X = [ s[0] for s in sample_s ]
        Y = []

        for state, state_, action, reward, done in zip(sample_s, sample_s_, sample_act, sample_r, sample_d):
            current_q = self.model.predict(state)[0]
            next_q = self.model.predict(state_)[0]
            max_q = np.amax(next_q)

            current_q[action] = reward + max_q * .99 * ( 1 - done )

            Y.append(current_q)
            
        X, Y = np.array(X), np.array(Y)

        self.model.fit(X, Y, epochs=1, verbose=0)
Ejemplo n.º 19
0
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon()))

classifier.compile(loss='binary_crossentropy',
          optimizer= "adam",
          metrics=[f1])

classifier.fit(X_train,y_train, batch_size=1, epochs=300)

eval_model=classifier.evaluate(X_train, y_train)
print("\n\t eval_model=",eval_model)

y_pred=classifier.predict(X_test)


#print("\n\t y_test=",type(y_test),"\t y_pred=",type(y_pred))
y_pred =(y_pred>0.5)
#y_pred=pd.Series(y_pred)

'''
    saving model
'''

model_json=classifier.to_json()

with open("/home/kapitsa/PycharmProjects/upwork/models//model.json","w") as json_file:
    json_file.write(model_json)
Ejemplo n.º 20
0
val_accuracy = tumor_model_train_dropout.history['val_acc']
loss = tumor_model_train_dropout.history['loss']
val_loss = tumor_model_train_dropout.history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, 'bo', label='Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()

predicted_classes = tumor_model.predict(X_test)

predicted_classes = np.argmax(np.round(predicted_classes), axis=1)

correct = np.where(predicted_classes == y_test)[0]
print("Found %d correct labels" % len(correct))
for i, correct in enumerate(correct[:9]):
    plt.subplot(3, 3, i + 1)
    plt.imshow(X_test[correct].reshape(28, 28),
               cmap='gray',
               interpolation='none')
    plt.title("Predicted {}, Class {}".format(predicted_classes[correct],
                                              y_test[correct]))
    plt.tight_layout()

incorrect = np.where(predicted_classes != y_test)[0]
Ejemplo n.º 21
0
              loss='binary_crossentropy',
              metrics=['accuracy'])

callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)

# fit the model
model.fit(X_train,
          y_train,
          epochs=20,
          batch_size=5000,
          verbose=1,
          callbacks=[],
          validation_data=(X_test, y_test),
          class_weight=class_weights)

res = model.predict(X_test)
#print res[:100]
bkg_acc = 0
bkg_trials = 0
sig_acc = 0
sig_trials = 0
for r, t in zip(res, y_test):
    if t == 1:
        sig_trials += 1
        sig_acc += int(r[0] > 0.5)
    else:
        bkg_trials += 1
        bkg_acc += int(r[0] <= 0.5)
print 'sig_acc:', sig_acc, '/', sig_trials
print 'bkg_acc:', bkg_acc, '/', bkg_trials
Ejemplo n.º 22
0
model.compile(loss='mean_squared_error', optimizer=SGD(lr=lr), metrics=['accuracy'])

# batch_size define speed of studying
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=[stopper], verbose=verbose)

score = model.evaluate(x_test, y_test, verbose=1)

print("Accuracy on train data\t %.f%%" % (history.history['acc'][stopper.stopped_epoch] * 100))
print("Accuracy on testing data %.f%%" % (score[1] * 100))
print("Loss on train data %.f%%" % (history.history['loss'][stopper.stopped_epoch] * 100))
gr.plot_history_separte(history, save_path_acc="ACC.png", save_path_loss="LOSS.png",
                        save=False, show=True)

# Visualization after training
zeros = np.zeros(int(train_size * 0.4)).reshape(x_test.shape)
ones = np.zeros(int(train_size * 0.4)).reshape(x_test.shape)

for i, el in enumerate(model.predict(x_test)):
    if el >= 0.5:
        ones[i] = x_test[i]
    else:
        zeros[i] = x_test[i]

plt.xlim(0, 1.3)
plt.ylim(0, 1)
plt.title("After training data")
plt.plot(zeros.transpose()[0], zeros.transpose()[1], '.')
plt.plot(ones.transpose()[0], ones.transpose()[1], '.')
plt.show()
Ejemplo n.º 23
0
# Training of the features !!
# Using Keras Neural Network !!
# Keras
from keras import Sequential
from keras.layers import Dense

classifier = Sequential()
#First Hidden Layer
classifier.add(Dense(100, activation='relu', kernel_initializer='random_normal', input_dim = 784))
#Second  Hidden Layer
#classifier.add(Dense(100, activation='relu', kernel_initializer='random_normal'))
classifier.add(Dense(100, activation='relu', kernel_initializer='random_normal'))
#Output Layer
classifier.add(Dense(10, activation='softmax', kernel_initializer='random_normal'))
#Compiling the neural network
classifier.compile(optimizer ='RMSprop',loss='sparse_categorical_crossentropy', metrics =['accuracy'])

#Fitting the data to the training dataset
classifier.fit(X_train,y_train, batch_size=20, epochs= 100 )


#y_hat_xgb = model_xgb.predict(X_test)
y_hat_keras = classifier.predict(X_test)
sample_submission = []
for i in range(len(y_hat_keras)):
    sample_submission.append((i+1 , np.argmax(y_hat_keras[i])))
    
df = pd.DataFrame(columns = ['ImageId' , 'Label'] , data =  sample_submission)


df.to_csv(directory + 'sample_submission_Keras_sequential.csv', index = None)
train_data=train_data.dropna()
# print(train_data.info())
X=train_data.drop(['Y'],1).as_matrix()#7
y=train_data['Y'].as_matrix()#1
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)

model=Sequential()
model.add(Dense(14,input_shape=(7,)))
model.add(Activation('relu'))
model.add(Dense(1))
model.add((Dropout(0.3)))
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()

model.fit(X_train,y_train,epochs=10000,batch_size=16)
t=model.predict(X_test)

rate=0

for i in range(len(t)):
    if t[i]==y_test[i]:
        rate+=1
    else:
        pass
rate=1.0*rate/len(t)

print(rate)


# test_data=pd.read_csv('D:\sufe\A\contest_basic_test.tsv',sep='\t')
# test_data=test_data.dropna()
      keras.metrics.Precision(name='precision'),
      keras.metrics.Recall(name='recall'),
      keras.metrics.AUC(name='auc'),
]

# compile the model using binary cross-entropy rather than
# categorical cross-entropy -- this may seem counterintuitive for
# multi-label classification, but keep in mind that the goal here
# is to treat each output label as an independent Bernoulli
# distribution
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=METRICS)

## Fit model for multiple labels and print accuracy
history = model.fit(X_train, Y_train, validation_data=(X_test,Y_test),batch_size=64, epochs=200)

pred = model.predict(X_test,verbose=2)
pred_proba = model.predict_proba(X_test)
pred[pred>=0.5]=1
pred[pred<0.5]=0
print('pred: ', pred)
print('Y_test: ', Y_test)

conf_mat = multilabel_confusion_matrix(Y_test,pred)
print('conf mat: ')
print(conf_mat)

# summarize history for accuracy
ExtraSensoryHelperFunctions.PlotEpochVsAcc(plt,history)

# summarize history for loss
ExtraSensoryHelperFunctions.PlotEpochVsLoss(plt,history)
Ejemplo n.º 26
0
plt.plot(trained.history['loss'])
plt.plot(trained.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.plot(trained.history['sensitivity'])
plt.plot(trained.history['val_sensitivity'])
plt.title('Model sensitivity')
plt.ylabel('sensitivity')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

plt.plot(trained.history['specificity'])
plt.plot(trained.history['val_specificity'])
plt.title('Model specificity')
plt.ylabel('specificity')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

cnn_Y_pred = model.predict(X_valid)
cnn_Y_pred = np.argmax(cnn_Y_pred, axis=1)

# to display prediction images in subplots
display_prediction(5,2,X_pred,cnn_Y_pred)

Ejemplo n.º 27
0
model = Sequential([Dense(na, input_shape=(ns, ))])

model.set_weights([np.array([[0, 0, 0], [1, 2, 3]]), np.array([0, 0, 0])])

test_size = 200

rewards = 0
x = []
y = []
for _ in range(test_size):
    s = env.reset()
    x.append(s[0])
    i = 0
    while True:
        # env.render()
        q_values = model.predict(np.array([s]))
        s, r, done, _ = env.step(np.argmax(q_values == q_values.max()))
        rewards += r
        i += 1
        if done:
            print("solved for {} iterations".format(i))
            break
    y.append(-i)
print("The average reward of {} episodes is {}".format(test_size,
                                                       rewards / test_size))
plt.title("MountainCar Designed Linear Model")
plt.ylabel("Episode reward")
plt.xlabel("Initial location")
plt.scatter(x, y)
plt.show()
class QReplayNetworkModel(AbstractModel):
    """ Prediction model which uses Q-learning and a neural network which replays past moves.

        The network learns by replaying a batch of training moves. The training algorithm ensures that
        the game is started from every possible cell. Training ends after a fixed number of games, or
        earlier if a stopping criterion is reached (here: a 100% win rate).

        :param class Maze game: Maze game object.
    """

    def __init__(self, game, **kwargs):
        super().__init__(game, **kwargs)

        if kwargs.get("load", False) is False:
            self.model = Sequential()
            self.model.add(Dense(game.maze.size, input_shape=(2,), activation="relu"))
            self.model.add(Dense(game.maze.size, activation="relu"))
            self.model.add(Dense(len(actions)))
        else:
            self.load(self.name)

        self.model.compile(optimizer="adam", loss="mse")

    def save(self, filename):
        with open(filename + ".json", "w") as outfile:
            outfile.write(self.model.to_json())
        self.model.save_weights(filename + ".h5", overwrite=True)

    def load(self, filename):
        with open(filename + ".json", "r") as infile:
            self.model = model_from_json(infile.read())
        self.model.load_weights(filename + ".h5")

    def train(self, stop_at_convergence=False, **kwargs):
        """ Hyperparameters:

            :keyword float discount: (gamma) preference for future rewards (0 = not at all, 1 = only)
            :keyword float exploration_rate: (epsilon) 0 = preference for exploring (0 = not at all, 1 = only)
            :keyword float exploration_decay: exploration rate reduction after each random step (<= 1, 1 = no at all)
            :keyword int episodes: number of training games to play
            :keyword int sample_size: number of samples to replay for training
            :return int, datetime: number of training episodes, total time spent
        """
        discount = kwargs.get("discount", 0.90)
        exploration_rate = kwargs.get("exploration_rate", 0.10)
        exploration_decay = kwargs.get("exploration_decay", 0.995)  # % reduction per step = 100 - exploration decay
        episodes = kwargs.get("episodes", 10000)
        sample_size = kwargs.get("sample_size", 32)

        experience = ExperienceReplay(self.model, discount=discount)

        # variables for reporting purposes
        cumulative_reward = 0
        cumulative_reward_history = []
        win_history = []

        start_list = list()  # starting cells not yet used for training
        start_time = datetime.now()

        for episode in range(1, episodes + 1):
            if not start_list:
                start_list = self.environment.empty.copy()
            start_cell = random.choice(start_list)
            start_list.remove(start_cell)

            state = self.environment.reset(start_cell)

            loss = 0.0

            while True:
                if np.random.random() < exploration_rate:
                    action = random.choice(self.environment.actions)
                else:
                    # q = experience.predict(state)
                    # action = random.choice(np.nonzero(q == np.max(q))[0])
                    action = self.predict(state)

                next_state, reward, status = self.environment.step(action)

                cumulative_reward += reward

                experience.remember([state, action, reward, next_state, status])

                if status in ("win", "lose"):  # terminal state reached, stop episode
                    break

                inputs, targets = experience.get_samples(sample_size=sample_size)

                self.model.fit(inputs,
                               targets,
                               epochs=4,
                               batch_size=16,
                               verbose=0)
                loss += self.model.evaluate(inputs, targets, verbose=0)

                state = next_state

                self.environment.render_q(self)

            cumulative_reward_history.append(cumulative_reward)

            logging.info("episode: {:d}/{:d} | status: {:4s} | loss: {:.4f} | e: {:.5f}"
                         .format(episode, episodes, status, loss, exploration_rate))

            if episode % 5 == 0:
                # check if the current model wins from all starting cells
                # can only do this if there is a finite number of starting states
                w_all, win_rate = self.environment.win_all(self)
                win_history.append((episode, win_rate))
                if w_all is True and stop_at_convergence is True:
                    logging.info("won from all start cells, stop learning")
                    break

            exploration_rate *= exploration_decay  # explore less as training progresses

        self.save(self.name)  # Save trained models weights and architecture

        logging.info("episodes: {:d} | time spent: {}".format(episode, datetime.now() - start_time))

        return cumulative_reward_history, win_history, episode, datetime.now() - start_time

    def q(self, state):
        """ Get q values for all actions for a certain state. """
        return self.model.predict(state)[0]

    def predict(self, state):
        """ Policy: choose the action with the highest value from the Q-table.
            Random choice if multiple actions have the same (max) value.

            :param np.ndarray state: Game state.
            :return int: Chosen action.
        """
        q = self.q(state)

        logging.debug("q[] = {}".format(q))

        actions = np.nonzero(q == np.max(q))[0]  # get index of the action(s) with the max value
        return random.choice(actions)
import matplotlib.pyplot as plt
import WildCardLib

data = []
for i in range(300):
    data.append([numpy.sin(i * 0.1), 0, numpy.cos(i * 0.1)])

print(data)
look_back = 20
trainX, trainY = WildCardLib.create_dataset(data, look_back)

print(trainX)
model = Sequential()
model.add(LSTM(20, input_shape=(look_back, 3)))
# model.add(Dense(100, input_dim=3, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(3))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=2, batch_size=1, verbose=2)
trainPredict = model.predict(trainX)
new_len = int(len(trainX) * 1)
Y = WildCardLib.extended_this(model=model,
                              trainX=trainX[0:new_len],
                              trainY=trainY[0:new_len],
                              look_back=look_back,
                              multi=1.5)
# plt.plot(trainPredict)

plt.plot(Y)
plt.plot(trainY)
plt.show()
Ejemplo n.º 30
0
class DNN_Model(Common_Model):
    '''
    __init__(): 初始化神经网络

    输入:
        input_shape: 特征维度
        num_classes(int): 标签种类数量
    '''
    def __init__(self, input_shape, num_classes, **params):
        super(DNN_Model, self).__init__(**params)
        self.input_shape = input_shape
        self.model = Sequential()
        self.make_model()
        self.model.add(Dense(num_classes, activation='softmax'))
        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        print(self.model.summary(), file=sys.stderr)

    '''
    save_model(): 将模型权重以 model_name.h5 和 model_name.json 命名存储在 /Models 目录下
    '''

    def save_model(self, model_name):
        h5_save_path = 'Models/' + model_name + '.h5'
        self.model.save_weights(h5_save_path)

        save_json_path = 'Models/' + model_name + '.json'
        with open(save_json_path, "w") as json_file:
            json_file.write(self.model.to_json())

    '''
    train(): 在给定训练集上训练模型

    输入:
        x_train (numpy.ndarray): 训练集样本
        y_train (numpy.ndarray): 训练集标签
        x_val (numpy.ndarray): 测试集样本
        y_val (numpy.ndarray): 测试集标签
        n_epochs (int): epoch数

    '''

    def train(self, x_train, y_train, x_val=None, y_val=None, n_epochs=50):
        acc = []
        loss = []
        val_acc = []
        val_loss = []

        if x_val is None or y_val is None:
            x_val, y_val = x_train, y_train
        for i in range(n_epochs):
            # 每个 epoch 都随机排列训练数据
            p = np.random.permutation(len(x_train))
            x_train = x_train[p]
            y_train = y_train[p]

            history = self.model.fit(x_train, y_train, batch_size=32, epochs=1)
            # 训练集上的损失值和准确率
            acc.append(history.history['acc'])
            loss.append(history.history['loss'])
            # 验证集上的损失值和准确率
            val_loss_single, val_acc_single = self.model.evaluate(x_val, y_val)
            val_acc.append(val_acc_single)
            val_loss.append(val_loss_single)

        plotCurve(acc, val_acc, 'LSTM Accuracy', 'acc')
        plotCurve(loss, val_loss, 'LSTM Loss', 'loss')
        self.trained = True

    '''
    predict(): 识别音频的情感

    输入:
        samples: 需要识别的音频特征

    输出:
        list: 识别结果
    '''

    def predict(self, sample):
        # 没有训练和加载过模型
        if not self.trained:
            sys.stderr.write("No Model.")
            sys.exit(-1)

        return np.argmax(self.model.predict(sample), axis=1)

    def make_model(self):
        raise NotImplementedError()
Ejemplo n.º 31
0
              metrics=['accuracy'])
# Create validation set
history = model.fit(train_data,
                    train_labels_one_hot,
                    batch_size=256,
                    epochs=8,
                    verbose=2,
                    validation_data=(test_data, test_labels_one_hot))
# print((history.history.keys()))

# evaluate model's error
score = model.evaluate(test_data, test_labels_one_hot, verbose=0)
print("Baseline error: %.2f%%" % (100 - score[1] * 100))

# Plot a single image, its label, and inference (model prediction for label)
infer = str(np.argmax(model.predict(test_data[0].reshape(1, 784))))
labelsAndTrainingImages = list(zip(test_images, test_labels))

# Plot single image and model estimation
plot4 = plt.figure(4)
plt.axis('off')
plt.imshow(test_images[0],
           cmap='gray',
           interpolation='nearest',
           aspect='equal')
plt.title("Label " + str(test_labels[0]) + "\n" + "Infer " + infer)

# Plot first twenty images, labels, and inferen
plot1 = plt.figure(figsize=(10, 10))
for index, (image, label) in enumerate(labelsAndTrainingImages[:20]):
    plt.subplot(4, 5, index + 1)
Ejemplo n.º 32
0
from keras import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D

lstm_out = 50

# create and fit the LSTM network
model = Sequential()
model.add(LSTM(lstm_out, input_shape=(100,nparam)))
model.add(Dense(output_dim=nout))
model.compile(loss='mean_squared_error', optimizer='adam', metrics = ['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=100, verbose=2)


# Predict train values and calculate the accuracy
pred_train = normalizer.inverse_transform(model.predict(x_train))
pred_train = np.round(pred_train)
y_train_real = normalizer.inverse_transform(y_train)
aux = y_train_real==pred_train
acc_train = sum(sum(y_train_real==pred_train))/(ntrain*nout)*100

# Predict test values and calculate the accuracy
pred_test = normalizer.inverse_transform(model.predict(x_test))
pred_test = np.round(pred_test)
y_test_real = normalizer.inverse_transform(y_test)
acc_test = sum(sum(y_test_real==pred_test))/(ntest*nout)*100

print("Train accuracy: "+str(acc_train))
print("Test accuracy: "+str(acc_test))

# Export the model to json