Ejemplo n.º 1
0
Output = Test.iloc[:, 2].values

# Setting up dataframes for 10 - cross validation
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(NN_Data, Output, test_size = 0.1,random_state = 0)

# feature scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# NEURAL NETWORK TIME!

import keras
from keras.model import Sequential
from keras.layers import Dense


NN = Sequential()
# Input and 1st hidden layer

NN.add(Dense(output_dim = 2, init = 'uniform', activation = 'relu', input_dim = 3))

# Second hidden layer
NN.add(Dense(output_dim = 1, init = 'uniform', activation = 'relu'))

# training the NN
NN.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = ['accuracy'])
NN.fit(X_train, y_train, batch_size = 10, nb_epoch = 100)
Ejemplo n.º 2
0
################### part eleven -- define optimizer ###############################
RMSprop = optimizers.RMSprop(lr=0.001) # can tune learning rate here

################## part twelve -- compile model #################################
classifier.compile(optimizer = RMSprop, loss = 'mean_squared_error', metrics = ['accuracy']) # can add more metrics, can change loss based on purpose

################# part thirteen -- set up early stoppping and prepare for history recording ##################################
from keras.callbacks import EarlyStopping, TensorBoard
early_stopping = EarlyStopping(monitor='val_loss', patience = 100)
tensorBoard = TensorBoard(log_dir={path to store log file}, histogram_freq=0, batch_size=128)
'''
Do we need to keep the batch_size of tensorBoard and the training classifier part SAME?
'''

#################### part fourteen -- fitting the model to training data set ################################################
hist = classifier.fit(X_train, y_train, batch_size=512, epochs=5000, validation_split=0.05)

# keep track of ending time
end = datetime.now()

################## part fifteen -- evaluation ###########################################
y_pred = classifier.predict(X_test)
y_pred_df = pd.DataFrame(y_pred)

################## part sixteen -- editing history file ############################
text_file = open({path of txt file you want to store}, 'w')
text_file.write("Processing time is " + str(end-start) + "\n")
text_file.write(str(hist.history))
text_file.close()

################## part seventeen -- store weight/output to a .h5 file ####################
Ejemplo n.º 3
0
# binary classes
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])

# mean square root
model.compile(optimizer='rmsprop', loss='mse')

# user defined metrics
def mean_pred(y_true, y_pred)
    return K.mean(y_pred)

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy', mean_pred])

# train
# generate dataset
# binary
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))

model = Squential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss = 'binary_crossentropy', metrics=['accuracy'])
model.fit(data, labels, epochs=10, batch_size=32)

# multiple
labels = np.random.randint(10, size=(1000, 1))
one_hot_labels = keras.utils.to_categorical(label, num_classes=10)
model.fit(data, one_hot_labels, epoches=10, batch_size=32)
Ejemplo n.º 4
0
x = trainDataset[:-1, :]

y = trainDataset[1:, :]

from keras.model import Sequential
from keras.layers import Dense
from keras.layers import LSTM

model = Sequential()

model.add(LSTM(units=32, activation='sigmoid', input_shape=(None, 1)))

model.add(Dense(units=1))

model.compile(optimizer='adam', loss='mean_squared_error')

model.fit(x, y, epochs=100, batch_size=32)

realStock = pd.read_csv('Google_Stock_Price_Test.csv')

realStock = realStock.iloc[:, 4:5].values

plt.plot(realStock, 'r')
Ejemplo n.º 5
0
from keras.model import Sequential
from keras.layers import Dense

model = Sequential()
model.add(Dense(100, input_dim=128, init='uniform', activation='relu'))
model.add(Dense(100, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train,
          Y_train,
          validation_data=(X_test, Y_test),
          nb_epoch=500,
          batch_size=10,
          verbose=2)

#training accuracy
scores = model.evaluate(X_train, Y_train)
print("Accuracy: %.2f%%" % (scores[1] * 100))

#testing accuracy
scores2 = model.evaluate(X_test, Y_test)
print("Accuracy: %.2f%%" % (scores2[1] * 100))
Ejemplo n.º 6
0

# In[ ]:


# all the pixels will be devided by 255
new_train_data = train_data/255
new_test_data = test_data/255

# this converts the pixels which has the range of 0-255 into 0-1


# In[ ]:


model.fit(new_train_data,new_test_data,epochs=20)


# In[ ]:


import matplotlib.pyplot as plt

plt.plot(model.history.history['loss'])
plt.xlabel('# epochs')
plt.ylabel('loss')
plt.show()


# In[ ]:
Ejemplo n.º 7
0
from keras.layers import Dense
from keras.optimizers import Adam,SGD

%matplotlib inline

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

df  = pd.read_csv('../data/weight-height.csv')
X = df[['Height']].values
y_true = df[['Weight']].values

model = Sequential()

#model.add(Dense(1,input_shape(1,))

model.add(Dense(4,input_shape(1,))
model.add(Dense(4,input_shape(4,))
model.add(Dense(1,input_shape(4,))
          
model.compile(Adam(lr=0.8),'mean_squared_error')
          
model.fit(X,y_true,epochs=40)


''' performance of model
    Classification
    Linear Regression'''
          
Ejemplo n.º 8
0
                loss='hinge')

siamese.set_weight(siamese_theano)

get_feature100 = theano.function(siamese.layers[0].input,
                                  siamese.layers[-2].output)

feat_x = get_feature100(train_x)

knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(feat_x)

feat_neighbor = np.empty((n_samples, 6, feat_x.shape[1]))
for sample in xrange(n_samples):
    neighbor_x = knn.kneighbors(feat_x[sample])
    feat_neighbor[sample, :5, :] = neighbor_x
    feat_neighbor[sample, 5, :] = feat_x[sample]

rnn = Sequential()
rnn.add(LSTM(100, input_shape=(5, 100), activation='tanh'))
rnn.add(Dense(n_labels, activation='softmax'))
rnn.compile(optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

rnn.fit(feat_neighbor, train_y,
        batch_size=5,
        nb_epoch=10,
        verbose=1)

x_train = np.array(images)
y_train = np.array(measurements)

from keras.model import Sequential
from keras.layers import Flatten, dense, Lambda

model = Sequential()
# lambda layer for normalization
model.add(Lambda(Lambda x: x / 255.0, input_shape = (160, 320, 3)))
model.add(Flatten())
model.add(Dense(1))

model.compile(loss="mse", optimizer="adam")
model.fit(x_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=5)
Ejemplo n.º 10
0
from keras.model import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import SGD

model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=20, batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
'''
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
Ejemplo n.º 11
0
#Making sure that the values are float so that we can get decimal points after division
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')

#Normalizing the RGB codes by dividing it to the max RGB values
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print('NUmber of images in x_train', x_train.shape[0])
print('Number of images in x_test', x_test.shape[0])

#Import the required Keras modules containing model and layers
from keras.model import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
#Creating a sequential model and adding the layers
model = Sequential()
model.add(Conv2d(28, kernel_size=(3, 3), input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())  #Flatten the 2D arrays for the fully connected layers
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(10, activation=tf.nn.softmax))

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(x=x_train, y=y_train, epochs=10)

model.evauate(x_test, y_test)