Esempio n. 1
0
def main():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    # Transform data to a list of 1D arrays
    dim_product = x_train.shape[1] * x_train.shape[2]
    x_train = x_train.reshape(x_train.shape[0], dim_product)
    x_test = x_test.reshape(x_test.shape[0], dim_product)

    # Normalize data so that every point is between 0 and 1
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # Turn labels to categories
    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    model = Sequential()
    model.add(Dense(1200, input_dim=dim_product, init="normal",
                    activation='tanh'))
    # model.add(Dense(400, init="normal", activation="relu"))
    model.add(Dense(10, init="normal", activation="softmax"))

    model.compile(loss="categorical_crossentropy", optimizer="SGD",
                  metrics=['accuracy'])

    print(f"Models summary: {model.summary()}")

    model.fit(x_train, y_train, batch_size=200, nb_epoch=60,
              validation_split=0.3, verbose=1)

    score = model.evaluate(x_test, y_test, verbose=0)
    print(f"Final score: {score[1]*100}")
    model.save('simple-mnist.h5')
Esempio n. 2
0
def train():
    # load the dataset but only keep the top n words, zero the rest
    (X_train, Y_train), (X_test, Y_test) = imdb.load_data(nb_words=top_words)
    # truncate and pad input sequences
    X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
    X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)

    # create the model
    embedding_vecor_length = 32
    model = Sequential()
    model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))
    model.add(Dropout(0.2))
    model.add(Convolution1D(nb_filter=32, filter_length=3, border_mode='same', activation='relu'))
    model.add(MaxPooling1D(pool_length=2))
    model.add(LSTM(100))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    print(model.summary())
    model.fit(X_train, Y_train, validation_data=(X_test, Y_test), nb_epoch=2, batch_size=64)

    # Final evaluation of the model
    scores = model.evaluate(X_test, Y_test, verbose=0)
    print("Accuracy: %.2f%%" % (scores[1]*100))
    model.save("imdb_%0.2f.pkl" % scores[1])
Esempio n. 3
0
def trainNN():
    # POSITIVE training data
    posPX, posSX = getAllWindowedMinMaxPositiveTrainingData('./sample/example30', preSize=10, postSize=20)
    posPY = np.array([[1]] * len(posPX))
    posSY = np.array([[1]] * len(posSX))

    # NEGATIVE training data
    negX = getSomeWindowedMinMaxNegativeTrainingData('./sample/example30/', size=30, num=200)
    negY = np.array([[0]] * 200)

    # ALL training data
    X = np.concatenate([posPX, posSX, negX])
    Y = np.concatenate([posPY, posSY, negY])

    # 使用keras创建神经网络
    # Sequential是指一层层堆叠的神经网络
    # Dense是指全连接层
    # 定义model
    model = Sequential()
    model.add(Dense(50, input_dim=30, activation='sigmoid'))
    model.add(Dense(50, activation='sigmoid'))
    model.add(Dense(10, activation='sigmoid'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    # model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
    model.fit(X, Y, epochs=200, batch_size=32)
    model.save('model.h5')
    return model
Esempio n. 4
0
def create_model(x_train, y_train, x_test, y_test):
    """
    Create your model...
    """
    layer_1_size = {{quniform(12, 256, 4)}}
    l1_dropout = {{uniform(0.001, 0.7)}}
    params = {
        'l1_size': layer_1_size,
        'l1_dropout': l1_dropout
    }
    num_classes = 10
    model = Sequential()
    model.add(Dense(int(layer_1_size), activation='relu'))
    model.add(Dropout(l1_dropout))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])
    model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    out = {
        'loss': -acc,
        'score': score,
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out
Esempio n. 5
0
def train_model(filename, epochs):
    '''Train the model. When finished, save the result to a file.
       Return the model.
    '''
    # Create the model.
    model = Sequential()

    # Build a layer.
    # A dense layer is a bunch of neurons densely connected to the neurons
    # in the previous layer. That's in contrast to convolutional
    # (there's apparently no "sparse" layer type).
    model.add(Dense(200, input_shape=(784,)))

    # Add a dropout layer, which will randomly drop out some data.
    # That helps keep the model from memorizing the dataset.
    # The dropout will happen after the first layer.
    # .2 is kind of small as a dropout fraction, but we're just making
    # a small test model of 200 neurons so we don't have a lot to spare.
    model.add(Dropout(0.2))

    # Add an activation.
    # Sigmoid isn't actually the right model to use for this problem.
    # RELU, rectified linear units, might be better.
    model.add(Activation('sigmoid'))

    # Add another dense layer. No need to define the input shape
    # this time, since it'll get that from the previous layer.
    # 100 is the output size.
    model.add(Dense(100))

    model.add(Activation('sigmoid'))

    # Another layer the size of our output.
    model.add(Dense(10))

    # A softmax activation layer will give us a list of probabilities
    # that add to 1, so we can see the distribution of probabilities
    # that an image is a particular digit.
    model.add(Activation('softmax'))

    model.summary()

    # Compile the model, giving it an optimizer and a loss function.
    # categorical_crossentropy will output a number indicating how sure
    # it is about the match.
    model.compile(optimizer='adam', loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Run the model.
    hist = model.fit(x_train, y_train, epochs=epochs, batch_size=100,
                     validation_data=(x_test, y_test))

    print("History:", hist)

    # You can train a model on a fast machine, then save it and load it
    # on something like a Pi.
    model.save(filename)

    return model
Esempio n. 6
0
    def train(self):
        # input image dimensions
        img_rows, img_cols = 28, 28

        batch_size = 128
        num_classes = 10

        epochs = self.epochs

        (x_train, y_train), (x_test, y_test) = self.load_data(self.data_file)
        
        if K.image_data_format() == 'channels_first':
            x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
            x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
            input_shape = (1, img_rows, img_cols)
        else:
            x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
            x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
            input_shape = (img_rows, img_cols, 1)
        
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train /= 255
        x_test /= 255
        print('x_train shape:', x_train.shape)
        print(x_train.shape[0], 'train samples')
        print(x_test.shape[0], 'test samples')
        
        # convert class vectors to binary class matrices
        y_train = keras.utils.to_categorical(y_train, num_classes)
        y_test = keras.utils.to_categorical(y_test, num_classes)
        
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(3, 3),
                         activation='relu',
                         input_shape=input_shape))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(num_classes, activation='softmax'))
        
        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adadelta(),
                      metrics=['accuracy'])
        
        model.fit(x_train, y_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  verbose=1,
                  validation_data=(x_test, y_test))
        score = model.evaluate(x_test, y_test, verbose=0)
        
        model.save(self.model_file)

        print('Test loss:', score[0])
        print('Test accuracy:', score[1])
Esempio n. 7
0
class Perceptron:
    def __init__(self):
        self.classifier = Sequential()

    def initialize(self, input_shape=(1,)):
        # Adding the Single Perceptron or Shallow network
        self.classifier.add(Dense(units=1,
                                  input_shape=input_shape,
                                  activation="relu",
                                  kernel_initializer="uniform"))
        # Adding dropout to prevent overfitting
        self.classifier.add(Dropout(rate=0.1))

        # Adding the output layer
        self.classifier.add(Dense(units = 1,
                                  activation='sigmoid',
                                  kernel_initializer='uniform'))

        # criterion loss and optimizer 
        self.classifier.compile(optimizer='adam',
                                loss='binary_crossentropy',
                                metrics=['accuracy'])

    def fit(self, X_train, Y_train, batch_size, nb_epoch):
        self.classifier.fit(X_train, Y_train, batch_size, nb_epoch)

    def predict(self, X_test):
        y_pred = self.classifier.predict(X_test)
        # y_pred = (y_pred > 0.5)
        return (y_pred.astype(int))

    @staticmethod
    def confusion_matrix(y_test, y_pred):
        # Making the Confusion Matrix
        from sklearn.metrics import confusion_matrix
        cm = confusion_matrix(y_test, y_pred)
        return cm

    def save(self, file_name):
        self.classifier.save(file_name)  # creates a HDF5 file 'my_model.h5'

    def load(self, file_name):
        from keras.models import load_model
        self.classifier = load_model(file_name)

    def summary(self):
        weights, biases = self.classifier.layers[0].get_weights()
        print("weights", weights.size, weights, "biases", biases)

    def get_model(self):
        return self.classifier

    def __del__(self):
        del self.classifier
class CNNModel(object):
    def __init__(self, model_config=None):
        self.model_config = model_config
        self.cnn = None

    def create(self):
        n_class = self.model_config['n_class']

        self.cnn = Sequential()

        self.cnn.add(Convolution2D(8, 3, 3, border_mode='same', input_shape=(1, 28, 28)))
        self.cnn.add(Activation('relu'))

        self.cnn.add(Convolution2D(16, 3, 3))
        self.cnn.add(Activation('relu'))
        self.cnn.add(MaxPooling2D(pool_size=(2, 2)))
        self.cnn.add(Dropout(0.25))

        self.cnn.add(Convolution2D(32, 3, 3, border_mode='same'))
        self.cnn.add(Activation('relu'))
        self.cnn.add(Convolution2D(64, 3, 3))
        self.cnn.add(Activation('relu'))
        self.cnn.add(MaxPooling2D(pool_size=(2, 2)))
        self.cnn.add(Dropout(0.25))

        self.cnn.add(Flatten())
        self.cnn.add(Dense(512))
        self.cnn.add(Activation('relu'))
        self.cnn.add(Dropout(0.5))
        self.cnn.add(Dense(n_class))
        self.cnn.add(Activation('softmax'))

    def compile(self):
        self.cnn.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
        self.cnn.summary()

    def load_trained_model(self):
        self.cnn = keras.models.load_model(self.model_config['trained_model_path'])

    def train(self, x_train, y_train):
        self.create()
        self.compile()
        self.cnn.fit(x_train, y_train,
                     batch_size=self.model_config['batch_size'],
                     nb_epoch=self.model_config['n_epoch'],
                     validation_split=0.01,
                     shuffle=self.model_config['shuffle_train_data'])
        if self.model_config['save_trained_model'] is True:
            self.cnn.save(self.model_config['save_trained_model_path'])

    def predict(self, x_predict):
        y_predict = self.cnn.predict_classes(x_predict)
        return y_predict
Esempio n. 9
0
def train_model(genre, dir_model, MP):
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) #check gpu is being used
    
    batch_size = MP['bs']
    lstm_size = MP['lstm_size']
    seq_length = MP['seq_length']
    drop = MP['dropout']
    lr = MP['lr']
    epochs = MP['epochs']
    
    text_to_int, int_to_text, n_chars = np.load('playlists/%s/ancillary_char.npy'%genre)
    vocab_size = len(text_to_int)
    X = np.load('playlists/%s/X_sl%d_char.npy'%(genre, seq_length))
    y = np.load('playlists/%s/y_sl%d_char.npy'%(genre, seq_length))

    # randomly shuffle samples before test/valid split
    np.random.seed(40)
    ran = [i for i in range(len(X))]
    np.random.shuffle(ran)
    
    X_train, X_valid, y_train, y_valid = train_test_split(X[ran], y[ran], test_size=0.2, random_state=42)

    try:
        model = load_model(dir_model)
        print("successfully loaded previous model, continuing to train")
    except:
        print("generating new model")
        model = Sequential()
        model.add(GRU(lstm_size, dropout=drop, recurrent_dropout=drop, return_sequences=True,
                      input_shape=(seq_length, vocab_size)))
        for i in range(MP['n_layers'] - 1):
            model.add(GRU(lstm_size, dropout=drop, recurrent_dropout=drop, return_sequences=True))
        model.add(TimeDistributed(Dense(vocab_size, activation='softmax'))) #output shape=(bs, sl, vocab)

        decay = 0.5*lr/epochs
        optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=decay, clipvalue=1)
        #optimizer = RMSprop(lr=lr, decay=decay)
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['categorical_accuracy'])
    print(model.summary())

    # callbacks
    checkpoint = ModelCheckpoint(dir_model, monitor='loss', save_best_only=True, mode='min')
    #earlystop = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=3)
    callbacks_list = [checkpoint]

    # train
    model.fit_generator(one_hot_gen(X_train, y_train, vocab_size, seq_length, batch_size),
                        steps_per_epoch=len(X_train)/batch_size, epochs=epochs, callbacks=callbacks_list,
                        validation_data=one_hot_gen(X_valid, y_valid, vocab_size, seq_length, batch_size),
                        validation_steps=len(X_valid)/batch_size)
    model.save(dir_model)
def useCNN():

	# Load data
	(train_images, train_labels), (test_images, test_labels) = load_data()


	# Change the labels from integer to categorical data
	nClasses = len(np.unique(train_labels))
	train_labels_one_hot = to_categorical(train_labels)
	test_labels_one_hot = to_categorical(test_labels)


	img_rows, img_cols = train_images.shape[1], train_images.shape[2]
	input_shape = (img_rows, img_cols, 2)


	# Build the CNN neural network 
	model = Sequential()
	model.add(Conv2D(16, kernel_size=(3, 3),
						 padding='valid', 
						 activation='relu', 
						 input_shape=input_shape))
	model.add(Conv2D(32, (3, 3), padding='valid', activation='relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(0.25))
	model.add(Flatten())
	model.add(Dense(32, activation='relu'))
	model.add(Dropout(0.5))
	model.add(Dense(nClasses, activation='softmax'))


	model.compile(optimizer=keras.optimizers.Adadelta(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])


	# Train it
	history = model.fit(train_images, train_labels_one_hot, batch_size=256, epochs=10, verbose=1, validation_data=(test_images, test_labels_one_hot))


	# Evaluate it
	[test_loss, test_acc] = model.evaluate(test_images, test_labels_one_hot)
	print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(test_loss, test_acc))


	# Save the model so it can be used later on
	model.save('./keras_model.h5', overwrite=True)


	return history
Esempio n. 11
0
def train_model_from_dir(root, vocabulary, vectors):
    word_dimension = 300
    context_size = 4
    hidden_layer_size = 100
    vocabulary_size = 30000
    log_file_name = "C:\\corpora\\MSCC\\log_mscc.txt"
    model_save_file_name = "C:\\corpora\\models\\model.h5"

    start_time = time.time()
    print("Creating the model object")
    model = Sequential()
    model.add(Dense(hidden_layer_size, input_dim=context_size * word_dimension, init='uniform', activation='tanh'))
    model.add(Dense(vocabulary_size, init='normal', activation='softmax'))  # can be also sigmoid (for a multiclass)
    print("compiling...")
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    print("compiled!")

    count = 0
    for path, subdirs, files in os.walk(root):
        for name in files:
            current_filename = os.path.join(path, name)
            if current_filename.endswith("TXT"):
                if find_word_index(current_filename, log_file_name) == -1:  # the file is not logged
                    count = count + 1
                    print("file number", count)
                    print current_filename
                    data = get_tokenized_file_to_vectors(vocabulary, current_filename, vectors)
                    print("got data")
                    dataset = np.genfromtxt(StringIO(data), delimiter=",")
                    print("got dataset")
                    X = dataset[:, 0:word_dimension * context_size]
                    Y = dataset[:, word_dimension * context_size:]
                    arrX = np.array(X)
                    arrY = np.array(Y)
                    model.fit(arrX, arrY, nb_epoch=50, batch_size=dataset.shape[0])  # check the batch size
                    log_train_file(current_filename, dataset.shape[0])
                    if count % 10 == 0:
                        print("Saving model...", count)
                        model.save(model_save_file_name)
                else:
                    print("file already trained:", current_filename)

    end_time = time.time()
    print("elapsed time", end_time - start_time)
    model.save(model_save_file_name)
def train_func(keras_training_X, keras_training_Y, training_size, classif_num, queue_size, channels = 3):
    # CNN LSTM Network
    filter_size = 3
    cnn = Sequential()
    cnn.add(Conv1D(filters=1, kernel_size=3, activation='relu', padding='same', input_shape=(channels, filter_size)))
    cnn.add(MaxPooling1D(pool_size=channels))
    cnn.add(Flatten())
    model = Sequential()
    model.add(TimeDistributed(cnn, input_shape=(queue_size-filter_size+1, channels, filter_size)))
    model.add(LSTM(100))
    model.add(Dense(classif_num, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.fit(keras_training_X, keras_training_Y, epochs = 5, batch_size = 64)

    # save model
    model.save('model.h5')

    '''
Esempio n. 13
0
def fit_lstm(X, y):
    """
        训练LSTM模型

        参数:
            - X:    数据特征
            - y:    数据标签

        返回:
            - model:    训练好的模型
    """
    n_sample = X.shape[0]       # 样本个数
    n_feat_dim = X.shape[1]     # 特征维度

    # shape: (样本个数, time step, 特征维度)
    X = X.reshape(n_sample, config.timestep, n_feat_dim)
    # X = X.reshape(n_sample, n_feat_dim, 1)
    print(X.shape)
    # 构建模型
    model = Sequential()
    model.add(LSTM(config.nodes,
                   batch_input_shape=(config.batch_size, config.timestep, n_feat_dim),
                   stateful=True))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')

    print('开始训练...')

    model.fit(X, y, epochs=10, batch_size=config.batch_size, verbose=0, shuffle=False)
    '''
    for i in range(config.nb_epoch):
        print('已迭代{}次(共{}次) '.format(i + 1, config.nb_epoch))
        model.fit(X, y, epochs=1, batch_size=config.batch_size, verbose=0, shuffle=False)
        model.reset_states()
    '''
    print('训练结束...')
    # 在所有训练样本上运行一次,构建cell状态
    # why?
    model.predict(X, batch_size=config.batch_size)

    # 保存模型
    model.save(config.model_file)

    return model
Esempio n. 14
0
def train(train_generator,train_size,input_num,dims_num):
    print("Start Train Job! ")
    start=time.time()
    inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
    layer1=LSTM(128)
    output=Dense(2,activation="softmax",name="Output")
    optimizer=Adam()
    model=Sequential()
    model.add(inputs)
    model.add(layer1)
    model.add(Dropout(0.5))
    model.add(output)
    call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
    model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
    model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
#    model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
    model.save(model_dir)
    end=time.time()
    print("Over train job in %f s"%(end-start))
Esempio n. 15
0
def Dense_network(path, x, y):
	x = np.reshape(x, (x.shape[0], 22, 1))
	X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)

	model = Sequential()
	model.add(Dense(22, input_shape=(22,1)))
	model.add(Activation('relu'))
	model.add(Dense(100))
	model.add(Activation('relu'))
	model.add(Dense(50))
	model.add(Activation('relu'))
	model.add(Flatten())
	model.add(Dense(3, activation='sigmoid'))

	model.compile(loss='mean_squared_error', optimizer='adam')
	model.fit(X_train, y_train, epochs=100, batch_size=32, verbose=0)

	print(model.evaluate(X_train, y_train))
	print(model.evaluate(X_test, y_test))

	model.save(path)	
Esempio n. 16
0
def train_model_from_dir(root, vocabulary_filename):

    start_time = time.time()
    print("Creating the model object")
    model = Sequential()
    model.add(Dense(10, input_dim=10000, init="uniform", activation="relu"))
    model.add(Dense(10000, init="normal", activation="softmax"))  # can be also sigmoid (for a multiclass)
    print("compiling...")
    model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
    print("compiled!")
    count = 0
    for path, subdirs, files in os.walk(root):
        for name in files:
            current_filename = os.path.join(path, name)
            if current_filename.endswith("txt"):
                count = count + 1
                print("file number", count)
                file_start = time.time()
                data = get_tokenized_file_to_vectors2(vocabulary_filename, current_filename)
                #   print("read file:", count)
                dataset = np.genfromtxt(StringIO(data), delimiter=",")
                #   print("shape", dataset.shape)
                if len(dataset.shape) == 2 and dataset.shape[1] == 20000:
                    X = dataset[:, 0:10000]
                    Y = dataset[:, 10000:]
                    arrX = np.array(X)
                    arrY = np.array(Y)
                    model.fit(arrX, arrY, nb_epoch=50, batch_size=dataset.shape[0])  # check the batch size
                    log_train_file(current_filename, dataset.shape[0])
                else:
                    log_fail_file(current_filename)
                file_end = time.time()
                print("file time:", file_end - file_start)
                if count % 10 == 0:
                    print("Saving model...", count)
                    model.save("C:\corpora\\model.h5")
    end_time = time.time()
    print("elapsed time", end_time - start_time)
    model.save("C:\corpora\\model.h5")
Esempio n. 17
0
def run_model(filename, train_X, train_y, test_X, test_y):
    train_X, train_y = shuffle_data(train_X, train_y)
    
    
    
    from sklearn import preprocessing
    #train_X = preprocessing.scale(train_X)
    #test_X = preprocessing.scale(test_X)
    scaler = preprocessing.StandardScaler().fit(train_X)
    scaler.fit(train_X)
    scaler.transform(train_X)
    scaler.transform(test_X)
    
    from sklearn import svm
    model = svm.SVC()
    y = np.argmax(train_y, axis=1)
    model.fit(train_X, y)
    p = model.predict(test_X)
    print('svm f1 =', f1_score(np.argmax(test_y, axis=1), p)) # for comparison purposes
    
    model = Sequential()
    model.add(Dense(70, input_dim=204, activation='relu', kernel_regularizer=regularizers.l2(0.00)))	# change input dim as necessary, it is kept this way here to showcase the dimensionality of best presented model in the paper
    model.add(Dense(60, activation='relu', kernel_regularizer=regularizers.l2(0.00)))
    model.add(Dense(50, activation='relu', kernel_regularizer=regularizers.l2(0.00)))
    model.add(Dense(40, activation='relu', kernel_regularizer=regularizers.l2(0.00)))
    
    model.add(Dense(2, activation='softmax'))
    
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    
    model.fit(x=train_X, y=train_y, batch_size=32, epochs=15, shuffle=True)
    
    os.chdir('D:/Python/TAR/system/models')
    model.save(filename + '.h5')	# manually move to folder based on neural network type produced
                                                 #
    
    p = model.predict(test_X)
    print('f1', filename, ':', f1_score(np.argmax(test_y, axis=1), np.argmax(p, axis=1)))
Esempio n. 18
0
def train_model_from_dir_batch(root, vocabulary, vectors):
    word_dimension = 300
    context_size = 4
    hidden_layer_size = 100
    vocabulary_size = 30000
    log_file_name = "C:\\corpora\\MSCC\\log_mscc.txt"
    model_save_file_name = "C:\\corpora\\models\\model.h5"

    start_time = time.time()
    print("Creating the model object")
    model = Sequential()
    model.add(Dense(hidden_layer_size, input_dim=context_size * word_dimension, init='uniform', activation='tanh'))
    model.add(Dense(vocabulary_size, init='normal', activation='softmax'))  # can be also sigmoid (for a multiclass)
    print("compiling...")
    model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    print("compiled!")

    count = 0
    for path, subdirs, files in os.walk(root):
        for name in files:
            current_filename = os.path.join(path, name)
            if current_filename.endswith("TXT"):
                if find_word_index(current_filename, log_file_name) == -1:  # the file is not logged
                    count = count + 1
                    print("file number", count)
                    print current_filename
                    model.fit_generator(n_gram_generator(vocabulary,current_filename, vectors), samples_per_epoch=100,
                                        nb_epoch=50)
                    log_train_file(current_filename, 666)
                    print("Saving model...", count)
                    model.save(model_save_file_name)
                else:
                    print("file already trained:", current_filename)

    end_time = time.time()
    print("elapsed time", end_time - start_time)
    model.save(model_save_file_name)
Esempio n. 19
0
def train_keras_model( train_x, train_y, dnn_model_path ):
	print train_x.shape,train_y.shape
	if os.path.exists(dnn_model_path): os.remove(dnn_model_path);
	if os.path.exists(dnn_model_path): model = load_model(dnn_model_path)
	else:
		model = Sequential()
		model.add( Dense(2, activation='sigmoid', input_shape=(train_x.shape[1],))  )
		#model.add( Dropout(0.25) )
		model.add( Dense(1, activation='sigmoid') )
		model.compile( loss='binary_crossentropy', optimizer='adam', metrics=['acc'] )
		model.fit(
			train_x, 
			train_y, 
			batch_size=256,
			epochs=50,
			verbose=1, 
			validation_split = 0.05,
			#class_weight = {1:1, 0:pos_nag_ratio},
			callbacks = [
				EarlyStopping(monitor='val_loss', patience=2, verbose=0, mode='auto'),
			],
		)
		model.save(dnn_model_path)
	return model
# Part 2 - Fitting the CNN to the images

from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   rotation_range=30,
                                   horizontal_flip=False,
                                   vertical_flip=False)

test_datagen = ImageDataGenerator(rescale=1. / 255)

training_set = train_datagen.flow_from_directory(
    r'G:\Internship\dataset\train',
    target_size=(96, 96),
    batch_size=32,
    class_mode='binary')

test_set = test_datagen.flow_from_directory(r'G:\Internship\dataset\test',
                                            target_size=(96, 96),
                                            batch_size=32,
                                            class_mode='binary')

classifier.fit_generator(training_set,
                         steps_per_epoch=163,
                         epochs=5,
                         validation_data=test_set,
                         validation_steps=20)
classifier.save('Model_pneumonia6.h5')
        print "R2 improvement: ", r2_diff
        print "-----------------------"
        if r2_diff < min_delta:
            print "Improvement Does Not Meet Minimum, Adding One to Patience Score"
            print "-----------------------"
            current_patience += 1
        if current_patience >= patience:
            print "Stopping training, patience threshold met"
            break
        last_r2 = current_r2


print 'Training model!'

# Calling the function we just wrote.  May need to tune batch size, min_delta, as well as the model.
early_stopping_cont_nn(model,
                       X=np.array(X_train),
                       y=np.array(y_train).ravel(),
                       val_data=(np.array(X_test), np.array(y_test).ravel()),
                       epochs_per_iter=1,
                       batch_size=500,
                       verbose=1,
                       max_iterations=1000,
                       min_delta=.0005,
                       patience=2)

print r2_score(y_test, model.predict(np.array(X_test)))

model.save('./neural_net_models/nn_2_wide.h5')
model.add(Dense(62))

### End of network ###


# Using a generator to help the model use less data
# Channel shifts help with shadows slightly
datagen = ImageDataGenerator(channel_shift_range=0.2)
datagen.fit(X_train)

# Compiling and training the model
model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy')
model.summary()

y_train = y_train.reshape((-1,1))
y_val = y_val.reshape((-1,1))


gene = datagen.flow(X_train, y_train, batch_size=batch_size)
#pdb.set_trace()

model.fit_generator(gene, steps_per_epoch=len(X_train)/batch_size, epochs=epochs, verbose=1, validation_data=(X_val, y_val))

# Freeze layers since training is done
model.trainable = False
model.compile(optimizer='Adam', loss='mean_squared_error')

# Save model architecture and weights
model.save('full_CNN_model.h5')
Esempio n. 23
0
model.add(Dropout(0.1))
model.add(Dense(8))
model.add(Activation('softmax'))

# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(
    filepath="results/simpleRNN3results/checkpoint-{epoch:02d}.hdf5",
    verbose=1,
    save_best_only=True,
    monitor='val_acc',
    mode='max')
csv_logger = CSVLogger(
    'results/simpleRNN3results/training_set_iranalysis3.csv',
    separator=',',
    append=False)
model.fit(X_train,
          y_train,
          batch_size=batch_size,
          nb_epoch=20,
          validation_data=(X_test, y_test),
          callbacks=[checkpointer, csv_logger])
model.save("results/simpleRNN3results/lstm4layer_model.hdf5")

loss, accuracy = model.evaluate(X_test, y_test)
print("\nLoss: %.2f, Accuracy: %.2f%%" % (loss, accuracy * 100))
y_pred = model.predict_classes(X_test)
#np.savetxt('results/simpleRNN3results/lstm4predicted.txt', np.transpose([y_test,y_pred]), fmt='%01d')
Esempio n. 24
0
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, shuffle='True', validation_data=(x_test, y_test),
                    callbacks=[tbCallBack])


score = model.evaluate(x_test, y_test)

print("Test loss: ", score[0])
print("Test accuracy: ", score[1])

fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')

plt.subplot(2, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')

plt.tight_layout()

plt.show(fig)
model.save('anmodel.h5')
Esempio n. 25
0
#model.compile(optimizer='nadam', loss='mean_squared_error')

#set early stopping monitor so the model stops training when it won't improve anymore
early_stopping_monitor = EarlyStopping(patience=50, monitor='loss')

#train model
model.fit(x_train,
          y_train,
          batch_size=my_batch,
          epochs=num_epochs,
          shuffle=False,
          validation_data=(x_test, y_test),
          callbacks=[early_stopping_monitor])

# save model
model.save(model_path)

#make predictions
y_train_pred = model.predict(x_train)
y_test_pred = model.predict(x_test)
pd.set_option("display.max_rows", 10)
print(abs(y_train_pred - y_train).sort_values(by='B/A') * BA_scaleMeV)
print(abs(y_test_pred - y_test).sort_values(by='B/A') * BA_scaleMeV)

print('MSQE_train (MeV)',
      mean_squared_error(y_train, y_train_pred) * BA_scaleMeV)
print('MSQE_test  (MeV)',
      mean_squared_error(y_test, y_test_pred) * BA_scaleMeV)

K.clear_session()
Esempio n. 26
0
model.add(Dense(n_outputs, activation='softmax'))
model.summary()

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
weight_path="{}_weights.best.hdf5".format('sentiment')
checkpoint = ModelCheckpoint(weight_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', save_weights_only=True)

train_steps = len(train_set) // BATCH_SIZE
validation_steps = len(validation_set) // BATCH_SIZE
model.fit_generator(training_data_generator(train_set), steps_per_epoch=train_steps, epochs=n_epochs, validation_data=training_data_generator(validation_set), validation_steps=validation_steps, verbose=1, callbacks=[checkpoint])

print('End training')


model.load_weights(weight_path)
model.save('sentiment_model_004.h5')

model = models.load_model('sentiment_model_004.h5', compile=False)

output = []
for index, value in test_df.iterrows():
    phrase_id = value['PhraseId']
    phrase = value['Phrase']
    word_list = preprocess_phrase(phrase)
    word_list = remove_stop_words(word_list)
    num_list = word_to_num(word_list)
    num_arr = np.array(num_list)
    num_arr = np.expand_dims(num_arr, 0)
    pad_num_arr = sequence.pad_sequences(num_arr, maxlen=longest_phrase_size)
    predict = model.predict_classes(pad_num_arr)
    output += [[phrase_id, predict[0]]]
Esempio n. 27
0
model.add(Dense(128))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

Y_test = np.reshape(Y_test, (len(Y_test), 1, 15))
Y_train = np.reshape(Y_train, (len(Y_train), 1, 15))

# nb_epoch=5;
import operator
batch_size = 5
model.fit(x_train,
          Y_train,
          batch_size=batch_size,
          nb_epoch=250,
          verbose=1,
          validation_data=(x_test, Y_test))
model.save('gradCNN.h5')
counter = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])

for i in range(len(x_test)):
    result = model.predict(np.array([x_test[i]]))
    index, value = max(enumerate(result[0][0]), key=operator.itemgetter(1))
    index1, value1 = max(enumerate(Y_test[i][0]), key=operator.itemgetter(1))
    if index == index1:
        counter[index] += 1

print((counter.sum() / len(Y_test)) * 100)
Esempio n. 28
0
def train(args):
    if args.d == "mnist":
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(-1, 28, 28, 1)
        x_test = x_test.reshape(-1, 28, 28, 1)

        layers = [
            Conv2D(64, (3, 3), padding="valid", input_shape=(28, 28, 1)),
            Activation("relu"),
            Conv2D(64, (3, 3)),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Dropout(0.5),
            Flatten(),
            Dense(128),
            Activation("relu"),
            Dropout(0.5),
            Dense(10),
        ]

    elif args.d == "cifar":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()

        layers = [
            Conv2D(32, (3, 3), padding="same", input_shape=(32, 32, 3)),
            Activation("relu"),
            Conv2D(32, (3, 3), padding="same"),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Conv2D(64, (3, 3), padding="same"),
            Activation("relu"),
            Conv2D(64, (3, 3), padding="same"),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Conv2D(128, (3, 3), padding="same"),
            Activation("relu"),
            Conv2D(128, (3, 3), padding="same"),
            Activation("relu"),
            MaxPooling2D(pool_size=(2, 2)),
            Flatten(),
            Dropout(0.5),
            Dense(1024, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)),
            Activation("relu"),
            Dropout(0.5),
            Dense(512, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)),
            Activation("relu"),
            Dropout(0.5),
            Dense(10),
        ]

    x_train = x_train.astype("float32")
    x_test = x_test.astype("float32")
    x_train = (x_train / 255.0) - (1.0 - CLIP_MAX)
    x_test = (x_test / 255.0) - (1.0 - CLIP_MAX)

    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    model = Sequential()
    for layer in layers:
        model.add(layer)
    model.add(Activation("softmax"))

    print(model.summary())
    model.compile(
        loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"]
    )

    model.fit(
        x_train,
        y_train,
        epochs=50,
        batch_size=128,
        shuffle=True,
        verbose=1,
        validation_data=(x_test, y_test),
    )

    model.save("./model/model_{}.h5".format(args.d))
plt.figure(figsize=(20,20))
for i in range(9):
    for j in range(9):
        fno=i*9 + j
        tempf1 = []
        tempf1.append(filters[1][fno])
        tempf2 = []
        tempf2.append(filters[1][fno])
        tempf3 = []
        tempf3.append(filters[1][fno])
        for k in range(11):
            tempf1.append(filters[0][k][0][fno])
            tempf2.append(filters[0][k][1][fno])
            tempf3.append(filters[0][k][2][fno])
        w1,h1 = freqz(tempf1)
        w2,h2 = freqz(tempf2)
        w3,h3 = freqz(tempf3)
        plt.subplot(9,9,fno+1)
        plt.plot(w1,np.abs(h1),color='r')
        plt.plot(w2,np.abs(h2),color='g')
        plt.plot(w3,np.abs(h3),color='b')
plt.tight_layout()
plt.savefig('FilterMap.png')
print('Done.')

#######################################################

print('Saving model...',end='')
model.save('EqModel.hd5')
print('Done.')
Esempio n. 30
0
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

#compile model
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

#Tensorboard for visualise
tensorboard = TensorBoard(log_dir='Mnist_log/' + tensorboard_name,
                          histogram_freq=30)

#Feed the data
model.fit(x_train,
          y_train,
          epochs=2,
          batch_size=128,
          validation_data=(x_test, y_test),
          callbacks=[tensorboard])

#Save Model
model.save(model_Name + '.model')

#Delete existing model
del model

#load saved model
save_model = keras.models.load_model(model_Name + '.model')
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
    'train_labeling',  # this is the target directory
    target_size=(500, 500),  # all images will be resized to 150x150
    batch_size=batch_size,
    class_mode='binary'
)  # since we use binary_crossentropy loss, we need binary labels, categorical
print(train_generator.batch_size)

# this is a similar generator, for validation data
validation_generator = test_datagen.flow_from_directory('test_labeling',
                                                        target_size=(500, 500),
                                                        batch_size=batch_size,
                                                        class_mode='binary')

print(validation_generator.class_indices)
print(validation_generator.classes)
model.fit_generator(train_generator,
                    steps_per_epoch=2000 // batch_size,
                    epochs=2,
                    validation_data=validation_generator,
                    validation_steps=800 // batch_size)
model.save_weights(
    'first_try.h5'
)  # always save your weights after training or during training

model.save(
    'model.h5')  # always save your weights after training or during training
Esempio n. 32
0
model.add(Dense(4, input_dim=4, activation='relu'))

# hidden layer
model.add(Dense(8, activation='relu'))

# output layer
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# start training and print out enough infomation
model.fit(scaled_X_train, y_train, epochs=50, verbose=2)

model.metrics_names

from sklearn.metrics import confusion_matrix, classification_report

# build the confuse matrix
predictions = model.predict_classes(scaled_X_test)

confusion_matrix(y_test, predictions)
print(classification_report(y_test, predictions))

# save the trained model and use it
model.save('mysupermodel.h5')
from keras.models import load_model

newmodel = load_model('mysupermodel.h5')
newmodel.predict_classes(scaled_X_test)
Esempio n. 33
0
test_x=np.transpose(test_x,[0,2,1])
test_y = hot_data_y[split_point:]
print("Data processing is finished!")
# design network
model = Sequential()
# model.add(LSTM(30, input_shape=(train_x.shape[1], train_x.shape[2]),kernel_regularizer=regularizers.l2(0.001),activity_regularizer=regularizers.l1(0.001)))
model.add(LSTM(30, input_shape=(train_x.shape[1], train_x.shape[2])))
# model.add(Dropout(0.2))
# model.add(LSTM(6, return_sequences=False))
model.add(Dense(labels, activation='softmax'))
model.summary() #打印出模型概况
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
# fit network
history = model.fit(train_x,train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_x, test_y), verbose=2, shuffle=True)
#save model after train保存模型文件
model.save('./models/lstm_model_label.h5')
# test the model
score = model.evaluate(test_x, test_y, verbose=2) #evaluate函数按batch计算在某些输入数据上模型的误差
print('Test accuracy:', score[1])
score = model.evaluate(train_x, train_y, verbose=2) #evaluate函数按batch计算在某些输入数据上模型的误差
print('Train accuracy:', score[1])
#导出数据
prediction_label = model.predict_classes(test_x)
prediction_label=[i+1 for i in prediction_label]
fact_label=np.argmax(test_y,1)
fact_label=[i+1 for i in fact_label]
analysis=[fact_label, prediction_label]
wb = openpyxl.Workbook()
sheet = wb.active
sheet.title = 'analysis_data'
for i in range(0, 2):
Esempio n. 34
0
model.add(BatchNormalization())
model.add(Conv2D(rank2, kernel_size=(1, 12), activation=None))
model.add(BatchNormalization())
model.add(Conv2D(128, kernel_size=(1, 1), activation='relu'))

# model.add(Dropout(0.5))
model.add(Conv2D(num_classes, kernel_size = (1, 1), activation=None))
# model.add(Conv2D(num_classes, (1, 1), activation='linear',  padding='same', use_bias=False))
model.add(Flatten())
model.add(Activation('softmax'))

model.summary()


model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(
                  lr=0.001, beta_1=0.9, beta_2=0.999),
              metrics=['accuracy'])

model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))

score=model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

model.save("mnist_cpd2.h5")
Esempio n. 35
0
#Fitting the CNN to images
from keras.preprocessing.image import ImageDataGenerator
import scipy.ndimage

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

training_set = train_datagen.flow_from_directory('dataset/training_set',
                                                 target_size=(64, 64),
                                                 batch_size=32,
                                                 class_mode='binary')

test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size=(64, 64),
                                            batch_size=32,
                                            class_mode='binary')

classifier.fit_generator(training_set,
                         steps_per_epoch=250,
                         epochs=25,
                         validation_data=test_set,
                         validation_steps=62.5)

print("Saving model")
classifier.save('cat_dog_classifier.h5')
print('Done..')
Esempio n. 36
0
model.add(Conv2D(64, (3, 3), activation='relu',input_shape=(img_rows,img_cols,1)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
#model.add(Dropout(0.75))
model.add(Dense(nb_classes, activation='softmax'))
model.summary()
#from pdb import set_trace;set_trace()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

datagen = ImageDataGenerator(
    #featurewise_center=True,
    #samplewise_center=True,
    rotation_range=2,
    width_shift_range=0.2,
    height_shift_range=0.2,
    horizontal_flip=True
)
print(X_train.shape)

model.fit_generator(datagen.flow(X_train, y_train, batch_size=256), class_weight=class_weight,
                    nb_epoch=50, verbose=1, steps_per_epoch=100,
                    callbacks=[WandbKerasCallback()],
                    validation_data=(X_val, y_val))
model.save("smile.h5")
Esempio n. 37
0
def TrainingNN(X_trainf,
               X_cv,
               Y_trainf,
               Y_cv,
               epoch=1000,
               O=0,
               A=0,
               D=0):  # This function is where the NeuralNets train

    optims = [
        optimizers.SGD(lr=0.4),
        optimizers.RMSprop(lr=0.4), 'Adagrad', 'Adadelta', 'Adam', 'Adamax',
        'Nadam'
    ]  #7

    activs = ['linear', 'sigmoid', 'tanh', 'softsign', 'relu', 'softplus']  #6
    drops = [0, 0.3, 0.6, 0.9]  #4

    model = Sequential()

    model.add(Dense(50, activation=activs[A], input_dim=10))
    model.add(Dense(70, activation=activs[A]))
    model.add(Dense(80, activation=activs[A]))
    model.add(Dropout(drops[D]))
    model.add(Dense(40, activation=activs[A]))
    model.add(Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer=optims[O],
                  metrics=['acc'])

    history = model.fit(X_trainf, Y_trainf, batch_size=64, epochs=epoch)

    model.save('my_model.h5')

    #Train cv split				0.25
    #Layers					50 70 80 0.1D 40 4
    #Optimiser				Nadam
    #Epochs					300
    #Activation fn
    #LR if not Nadam
    #Regularizing
    #model = load_model('my_model.h5')

    #mllist=[]
    #mlpredcv=[]
    #mlpredtr=[]
    #mllist.append(RandomForestClassifier())
    #mllist.append(GaussianNB())
    #for i in range(1):
    #	mllist[i].fit(X_trainf,Y_trainf)
    #	mlpredcv.append(mllist[i].predict(X_cv))
    #	mlpredtr.append(mllist[i].predict(X_trainf))
    #for i in range(1):
    #	print("\nRF Training (Should be 100)				:",accuracy_score(Y_trainf, mlpredtr[i])*100)
    #	print("RF Cross Validation (Hopefully 100)		:",accuracy_score(Y_cv, mlpredcv[i])*100)

    predictioncv = model.predict_classes(X_cv)
    predictrain = model.predict_classes(X_trainf)
    PREDCV = np.argmax(Y_cv, axis=1)
    PREDTR = np.argmax(Y_trainf, axis=1)

    cvaccuracy = accuracy_score(PREDCV, predictioncv) * 100
    print("\n\nTraining 			:", accuracy_score(PREDTR, predictrain) * 100)
    print("Cross Validation 		:", cvaccuracy)

    optimsname = [
        'SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam'
    ]
    #	print(history.history.keys())
    plt.plot(history.history['acc'])
    #	plt.plot(history.history['val_acc'])
    Title = (optimsname[O] + ' Optimiser with ' + activs[A] +
             ' Activation having ' + str(drops[D]) + ' Dropout')
    plt.title(Title)
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend([optimsname[O] + ' Optimiser with ' + activs[A]],
               loc='upper left')
    plt.show()
    #	savefilename='test'+str(O)+str(A)+str(D)+'a.png'
    #	plt.savefig(savefilename, bbox_inches='tight')

    return (predictioncv, predictrain, cvaccuracy)
Esempio n. 38
0
    def fun_run(self, datain, dataou, ind_train, ind_test):

        # requires:
        i0 = self.i0
        i1 = self.i1
        lay_neuron = self.lay_neuron

        #normalization
        if standard_val:
            datain = sc.fit_transform(datain)
            dataou = sc.fit_transform(dataou)

        else:
            datain = fun_scale(datain, lb, ub)
            dataou = fun_scale(dataou, lb, ub)

        # get the indices
        ind_train = ind_train[i0 - 1][i1 - 1]
        ind_test = ind_test[i0 - 1][i1 - 1]

        # get the training data
        datain_train = datain[ind_train]
        dataou_train = dataou[ind_train]

        # get the testing data
        datain_test = datain[ind_test]
        dataou_test = dataou[ind_test]

        tail_val = ''
        for jj in range(len(lay_neuron)):
            tail_val = tail_val + str(lay_neuron[jj]) + '_'

        tail_val = tail_val[:-1]

        filename = "model_keras_{}_{}_{}.h5".format(i0, i1, tail_val)
        filename = os.path.join(dir_data_python, filename)

        filename_dir = glob.glob(filename)
        if len(filename_dir) == 0:
            print("Initiate new Sequential network from scratch")

            model = Sequential()
            model.add(
                Dense(lay_neuron[0],
                      input_dim=datain_train.shape[1],
                      activation=activation_1st))
            #model.add(Dense(lay_neuron[0], input_dim=datain_train.shape[1]))
            #model.add(PReLU(alpha_initializer="zeros",alpha_regularizer=None,alpha_constraint=None,shared_axes=None,))

            #model.add(Dropout(dropout))
            for l0 in lay_neuron[1:]:
                print(l0)
                model.add(
                    Dense(l0,
                          activation=activation_hid,
                          kernel_regularizer=regularizers.l2(reg_rate)))
                #model.add(Dense(l0, kernel_regularizer=regularizers.l2(reg_rate)))
                #model.add(PReLU(alpha_initializer="zeros",alpha_regularizer=None,alpha_constraint=None,shared_axes=None,))
                model.add(Dropout(dropout))

            model.add(Dense(dataou_train.shape[1], activation=activation_lst))
            #sgd = optimizers.SGD(lr=0.5, decay=1e-9, momentum=0.9, nesterov=True)
            model.compile(loss=loss, optimizer=optimizer)
        else:
            print("Initiate Sequential transfer learning")
            model = load_model(filename)

        #reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.002,
        #                      patience=5, min_lr=0.000000000000000001)

        # train

        print("-------------------------------")
        print("TR-LEARN-KERAS | rrt {} | rrs {} | Net {}".format(
            i0, i1, tail_val))
        print("-------------------------------")

        history = model.fit(
            datain_train,
            dataou_train,
            epochs=epochs,
            batch_size=batch_size,
            verbose=verbose,
            shuffle=shuffle,
            validation_split=validation_split)  #callbacks=[reduce_lr]
        # save the model to disk
        #pickle.dump(my_neural_network, open(filename, 'wb'))
        model.save(filename)

        loss_vals = history.history['loss']
        val_loss_vals = history.history['val_loss']

        filename = "model_keras_loss_{}_{}_{}.txt".format(i0, i1, tail_val)
        filename = os.path.join(dir_data_python, filename)

        try:
            f = open(filename, 'x')
        except:
            f = open(filename, 'w')

        f.write("{}".format(loss_vals))
        f.close()

        filename = "model_keras_val_loss_{}_{}_{}.txt".format(i0, i1, tail_val)
        filename = os.path.join(dir_data_python, filename)
        try:
            f = open(filename, 'x')
        except:
            f = open(filename, 'w')

        f.write("{}".format(val_loss_vals))
        f.close()

        print("-------------------------------")
        print("TE-LEARN-KERAS | rrt {} | rrs {} | Net {}".format(
            i0, i1, tail_val))
        print("-------------------------------")

        # estimate train and test data
        dataes_train = model.predict(datain_train)
        dataes_test = model.predict(datain_test)

        trou = numpy.reshape(
            dataou_train, (dataou_train.shape[0] * dataou_train.shape[1], 1))
        tres = numpy.reshape(
            dataes_train, (dataes_train.shape[0] * dataes_train.shape[1], 1))
        teou = numpy.reshape(dataou_test,
                             (dataou_test.shape[0] * dataou_test.shape[1], 1))
        tees = numpy.reshape(dataes_test,
                             (dataes_test.shape[0] * dataes_test.shape[1], 1))

        filename = 'trou_{}_{}_{}.csv'.format(i0, i1, tail_val)
        filename = os.path.join(dir_data_python, filename)
        numpy.savetxt(filename, trou, delimiter=",")

        filename = 'tres_{}_{}_{}.csv'.format(i0, i1, tail_val)
        filename = os.path.join(dir_data_python, filename)
        numpy.savetxt(filename, tres, delimiter=",")

        filename = 'teou_{}_{}_{}.csv'.format(i0, i1, tail_val)
        filename = os.path.join(dir_data_python, filename)
        numpy.savetxt(filename, teou, delimiter=",")

        filename = 'tees_{}_{}_{}.csv'.format(i0, i1, tail_val)
        filename = os.path.join(dir_data_python, filename)
        numpy.savetxt(filename, tees, delimiter=",")

        if plot_train_cond:
            print("TR-PLOT-KERAS  | rrt {} | rrs {} | Net {}".format(
                i0, i1, tail_val))

            matplotlib.pyplot.figure(figsize=[10, 10])
            matplotlib.rc('xtick', labelsize=20)
            matplotlib.rc('ytick', labelsize=20)
            matplotlib.rc('font', family='Times New Roman')
            matplotlib.pyplot.plot(trou, tres, '.', markersize=1)
            if standard_val:
                lb_val = -2.5
                ub_val = +2.5
            else:
                lb_val = lb
                ub_val = ub

            matplotlib.pyplot.plot([lb_val, ub_val], [lb_val, ub_val], '-g')
            matplotlib.pyplot.xlabel('Real',
                                     fontsize=20,
                                     fontname='Times New Roman')
            matplotlib.pyplot.ylabel('Estimated',
                                     fontsize=20,
                                     fontname='Times New Roman')
            matplotlib.pyplot.title(
                'Train | RTT = {} | RRS = {} | Net_{}'.format(
                    i0, i1, tail_val),
                fontsize=20,
                fontname='Times New Roman')

            filename = 'tr_keras_{}_{}_{}.png'.format(i0, i1, tail_val)
            filename = os.path.join(dir_data_python, filename)
            matplotlib.pyplot.savefig(filename, dpi=300)

        if plot_test_cond:
            print("TE-PLOT-KERAS  | rrt {} | rrs {} | Net {}".format(
                i0, i1, tail_val))
            matplotlib.pyplot.figure(figsize=[10, 10])
            matplotlib.rc('xtick', labelsize=20)
            matplotlib.rc('ytick', labelsize=20)
            matplotlib.rc('font', family='Times New Roman')
            matplotlib.pyplot.plot(teou, tees, '.', markersize=1)
            if standard_val:
                lb_val = -2.5
                ub_val = +2.5
            else:
                lb_val = lb
                ub_val = ub

            matplotlib.pyplot.plot([lb_val, ub_val], [lb_val, ub_val], '-g')
            matplotlib.pyplot.xlabel('Real',
                                     fontsize=20,
                                     fontname='Times New Roman')
            matplotlib.pyplot.ylabel('Estimated',
                                     fontsize=20,
                                     fontname='Times New Roman')
            matplotlib.pyplot.title(
                'Test | RTT = {} | RRS = {} | Net_{}'.format(i0, i1, tail_val),
                fontsize=20,
                fontname='Times New Roman')

            filename = 'te_keras_{}_{}_{}.png'.format(i0, i1, tail_val)
            filename = os.path.join(dir_data_python, filename)
            matplotlib.pyplot.savefig(filename, dpi=300)
Esempio n. 39
0
#our validation generator
test_datagen = ImageDataGenerator(rescale=1. / 255)
validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='categorical')

# printing class weights
print(class_weights)

# tensoarboard output
tensorboard = TensorBoard(log_dir="logs/{}".format(time()))

# fitting the model
model.fit_generator(
    train_generator,
    steps_per_epoch=nb_train_samples/batch_size,
    epochs=epochs,
    class_weight=class_weights,
    validation_data=validation_generator,
    validation_steps=nb_validation_samples,
    verbose=1,
    callbacks=[tensorboard],
    initial_epoch=start_epoch
    )

model.save('SecondModel.h5')
model.save_weights('SecondModelWeights.h5')
Esempio n. 40
0
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax'))
    csv = CSVLogger("epochs1.log")

    # Compile the model
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.0001, decay=1e-6),
                  metrics=['accuracy'])

    # Train the model
    model.fit(X_train / 255.0,
              to_categorical(Y_train),
              batch_size=128,
              shuffle=True,
              epochs=63,
              validation_data=(X_test / 255.0, to_categorical(Y_test)),
              callbacks=[csv])

    # Evaluate the model
    scores = model.evaluate(X_test / 255.0, to_categorical(Y_test))

    print('Loss: %.3f' % scores[0])
    print('Accuracy: %.3f' % scores[1])

    import h5py
    model.save('cifar-10.h5')
Esempio n. 41
0
    model.fit(input_train,
              output_train,
              epochs=10000,
              batch_size=4086,
              verbose=1,
              callbacks=[early_stopping])
    print("fit")
    score = model.evaluate(input_train, output_train, batch_size=4086)
    print(model.metrics_names)
    print(score)
    if score[1] > 0.99:  # if accuracy is greater than .99, we have an accurate model
        print("Model settled into accuracy, we're done!")
        break
    print("Model went pathological so we're restarting")

model.save(
    "identity.h5", overwrite=True
)  # it worked, so write the model out to a file. We can load it later with load_model (imported above)

# lets look at our actual values:
for input in [(0), (1)]:
    value = model.predict(x=np.array(
        [input]), batch_size=1)  # ask the model if it thinks it is a 0 or a 1
    print(input, value)  # print the input and the classification of the output

# now lets look at what it thinks of non-integers
for input in np.arange(0, 0.99, 0.01):
    value = model.predict(x=np.array(
        [input]), batch_size=1)  # ask the model if it thinks it is a 0 or a 1
    print(input, value)  # print the input and the classification of the output
         activation='relu'))
lstm_model.add(
    LSTM(60,
         input_shape=(train_X.shape[1], train_X.shape[2]),
         kernel_initializer='normal',
         activation='relu'))
lstm_model.add(Dense(51, kernel_initializer='normal', activation='sigmoid'))
# Compile model
lstm_model.compile(loss='categorical_crossentropy',
                   optimizer='adam',
                   metrics=['accuracy'])

#need to adjust for missing rows
mtst_timeseries = mtst[1:]
bacc_metric = Project_Metrics(mtst_timeseries)
history = lstm_model.fit(train_X,
                         train_Y,
                         epochs=100,
                         batch_size=10000,
                         validation_data=(test_X, test_Y),
                         callbacks=[bacc_metric])
print(bacc_metric.get_data())

lstm_model.save(
    'lstm_step1_prevonly_lstm1.h5')  # creates a HDF5 file 'lstm_basic.h5'
with open('bacc_metric_simple.pkl',
          'wb') as output:  # Overwrites any existing file.
    pickle.dump(bacc_metric.get_data(), output)
with open('bacc_metric_detailed.pkl',
          'wb') as output:  # Overwrites any existing file.
    pickle.dump(bacc_metric.get_detailed_data(), output)
Esempio n. 43
0
              optimizer=adam,
              metrics=['categorical_accuracy'])

# learning schedule callback
history = History()
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate, history]

#model Fitting
print "Training..."
model.fit(X_train,
          y_train,
          validation_data=(X_test, y_test),
          nb_epoch=2000,
          batch_size=X_train.shape[0],
          callbacks=callbacks_list,
          verbose=1)
#model.fit(X_train, y_train,validation_data=(X_test,y_test),nb_epoch=550, batch_size=X_train.shape[0],class_weight={0:1, 1:6756.0/271}, callbacks=callbacks_list, verbose=1)

#Model prediction
predicted = model.predict_proba(X_test, batch_size=25)
predicted1 = model.predict_proba(X_val, batch_size=25)
pred = model.predict_classes(X_val, batch_size=25)
y_val = numpy.argmax(y_val, 1)
yt = numpy.argmax(y_test, 1)
print "\n\nROC_AUC Val Data: ", roc_auc_score(y_val, predicted1[:, 1])

numpy.save("Prediction.npy", predicted)
numpy.save("Xtest.npy", X_test)
model.save('H8_student.h5')
Esempio n. 44
0
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))

#Uses Adam optimizer
model.compile(loss='mse', optimizer='adam')

#Model Fit with Generator
history_object = model.fit_generator(train_generator,
                                     samples_per_epoch=len(train_file_path),
                                     validation_data=validation_generator,
                                     nb_val_samples=len(validation_file_path),
                                     nb_epoch=5,
                                     verbose=1)
model.save('model.h5')
print('Model Saved')
#print(history_object.history.keys())

plt.gcf().clear()
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('training_curve.png')
plt.ion()
plt.show()
training = np.array(training)
# create train and test lists. X - patterns, Y - intents
train_x = list(training[:, 0])
train_y = list(training[:, 1])
print("Training data created")

# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
# equal to number of intents to predict output intent with softmax
model = Sequential()
model.add(Dense(256, input_shape=(len(train_x[0]), ), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))

# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

#fitting and saving the model
hist = model.fit(np.array(train_x),
                 np.array(train_y),
                 epochs=200,
                 batch_size=5,
                 verbose=1)
model.save('chatbot_model.h5', hist)

print("model created")
Esempio n. 46
0
model.add(Dense(256, activation='relu', name='fc1', ))
model.add(Dense(16, activation='softmax'))  # Für jedes Label ein output

modelCheckpoint = ModelCheckpoint(FILEPATH, monitor='val_loss', verbose=0, save_best_only=True,
                                  save_weights_only=False, mode='auto', period=1)

reduceLROnPlateau = ReduceLROnPlateau(monitor='val_loss', factor=0.25,
                              patience=5, min_lr=0.0005)

earlyStopping = EarlyStopping(patience=15, monitor='val_loss')

model.compile(loss='categorical_crossentropy',
              optimizer=SGD(lr=LR, momentum=0.9),
              metrics=['accuracy'])

model.fit_generator(train_generator,
                    steps_per_epoch=16 * 800 // BATCHSIZE,  # (Num_cat * pics_cat / batchSize)
                    epochs=100,
                    validation_data=validation_generator,
                    validation_steps=16 * 150 // BATCHSIZE,
                    callbacks=[modelCheckpoint, WandbCallback(), reduceLROnPlateau, earlyStopping])


model.load_weights(FILEPATH, by_name=True)
model.save(os.path.join(wandb.run.dir, "model.h5"))

test_accuracy = model.evaluate_generator(test_generator,
                         steps=16 * 50 // BATCHSIZE)  # (Num_cat * pics_cat / batchSize)

wandb.run.summary["test_accuracy"] = test_accuracy[1]
Esempio n. 47
0
    model = load_model(FILENAME)
    score = model.evaluate(x_test, y_test, verbose=0)
    initial_epochs = pickle._load(open( EPOCHS_FILENAME, "rb" ))
    print("Initial network accuracy: %.2f%%, loss: %.4f, epochs: %5d " % (score[1] * 100, score[0], initial_epochs))
else:
    # Create new model
    model = Sequential()
    model.add(Dense(3032, activation="sigmoid", input_dim=90, name="input"))
    model.add(Dense(15, activation='softmax', name="output"))
    model.compile(loss='categorical_crossentropy', optimizer="adadelta", metrics=['accuracy'])
    initial_epochs = 0

model.fit(x_train, y_train,
          batch_size=100000,
          epochs=TOTAL_EPOCHS,
          verbose=0,
          validation_data=(x_test, y_test),
          class_weight = class_weight,
          callbacks=[PLogger(step=1), AutoSaver(model_filename=FILENAME, epochs_filename=EPOCHS_FILENAME, initial_epochs=initial_epochs, every=10)])

score = model.evaluate(x_test, y_test, verbose=0)

final_epochs = initial_epochs + TOTAL_EPOCHS

print("Network accuracy: %.2f%%, loss: %.4f, epochs: %5d " % (score[1] * 100, score[0], final_epochs ))

model.save(FILENAME)
pickle.dump(final_epochs, open( EPOCHS_FILENAME, "wb" ))


            self.dp = keras.layers.Dropout(0.2)

    def call(self, input2):
        x = self.dense1(input2) #顺序
        if self.use_dp:
            x = self.dp(x)
        x = self.dense2(x)
        x = self.dense3(x)

model2 = HousePredict()   #实例化
model2.compile()          #编译
model.fit(x_train, y_train, epochs=200, batch_size=200, verbose=2, validation_data=(x_valid, y_valid))

#---------------------------------------模型可视化----------------------------------------------------------
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])       #训练损失
plt.plot(history.history['val_loss'])   #验证损失
plt.title('Model loss')                 #标题
plt.ylabel('Loss')                      #纵坐标
plt.xlabel('Epoch')                     #横坐标
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

from keras.utils import plot_model
from keras.models import load_model
model.save('model_MLP.h5')                                      #保存模型
plot_model(model, to_file='model_MLP.png', show_shapes=True)    #模型可视化
model = load_model('model_MLP.h5')                              #加载模型
y_new = model.predict(x_valid)                                  #模型预测
min_max_scaler.fit(y_valid_pd)                                  #验证标签反归一化
y_new = min_max_scaler.inverse_transform(y_new)
Esempio n. 49
0
        # apply action, get reward
        x_t, r_t, game_over = game.step(a_t)
        s_t = preprocess_images(x_t)
        # if reward, increment num_wins
        if r_t == 1:
            num_wins += 1
        # store experience
        experience.append((s_tm1, a_t, r_t, s_t, game_over))
        
        if e > NUM_EPOCHS_OBSERVE:
            # finished observing, now start training
            # get next batch
            X, Y = get_next_batch(experience, model, NUM_ACTIONS, 
                                  GAMMA, BATCH_SIZE)
            loss += model.train_on_batch(X, Y)
        
    # reduce epsilon gradually
    if epsilon > FINAL_EPSILON:
        epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / NUM_EPOCHS
        
    print("Epoch {:04d}/{:d} | Loss {:.5f} | Win Count: {:d}"
          .format(e + 1, NUM_EPOCHS, loss, num_wins))
    fout.write("{:04d}\t{:.5f}\t{:d}\n"
          .format(e + 1, loss, num_wins))

    if e % 100 == 0:
        model.save(os.path.join(DATA_DIR, "rl-network.h5"), overwrite=True)
        
fout.close()
model.save(os.path.join(DATA_DIR, "rl-network.h5"), overwrite=True)
Esempio n. 50
0
test_labels_one_hot = to_categorical(test_labels)

#determine the number of classes
classes = np.unique(train_labels)
nClasses = len(classes)


#three layers
#activation function: both
#neurons: 256
model = Sequential()
model.add(Dense(256, activation='tanh', input_shape=(dataDim,)))
model.add(Dropout(0.2))
model.add(Dense(256, activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(nClasses, activation='softmax'))

model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_data, train_labels_one_hot, batch_size=256, epochs=50, verbose=1,
                    validation_data=(test_data, test_labels_one_hot))

#test model
[test_loss, test_acc] = model.evaluate(test_data, test_labels_one_hot)
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(
    test_loss, test_acc))
#save model
model.save('E:/reitz/3rd year/Project & Professionalism with Computer/shapedetection/data/shapesmodel.h5')
#### Train LSTM network
model = Sequential()
model.add( LSTM( 4, input_dim = look_back ) )
model.add( Dense(1) )
#model.compile( loss =  'mean_absolute_error', optimizer = 'adam' ) # mape
model.compile( loss =  'mean_squared_error', optimizer = 'adam' ) # values closer to zero are better.
# Values of MSE are used for comparative purposes of two or more statistical meythods. Heavily weight outliers,  i.e weighs large errors more heavily than the small ones.
# "In cases where this is undesired, mean absolute error is used.
#REF: Available loss functions  https://keras.io/objectives.
print('Start : Training model')
model.fit( trainX, trainY, nb_epoch = 100 , batch_size = 1, verbose = 2 )
print('Ends : Training Model')


model.save('PredictionModels/keras_model.h5')
del model

import time
#time.sleep(3)

# return a compiled model, identical to the previous one
model = load_model('PredictionModels/keras_model.h5')

####  Performance Evaluation 
trainScore = model.evaluate( trainX, trainY, verbose = 0 )
trainScore = math.sqrt( trainScore )
trainScore = scaler.inverse_transform( numpy.array( [[trainScore]] ) )
# Test Performanace
testScore = model.evaluate( testX, testY, verbose = 0 )
testScore = math.sqrt( testScore )
Esempio n. 52
0
    model.add(Dropout(drop_hid))

for w in dense_widths:
    model.add(Dense(w))
    model.add(Activation('relu'))
    if drop_hid:
        model.add(Dropout(drop_hid))
model.add(Dense(output_length))
model.add(Activation('relu'))

model.summary()

#opt = Adadelta()
#opt = SGD(lr=0.001)
opt = Adam()

model.compile(loss='mse', optimizer=opt, metrics=[])

batch_size = 1536
nb_epoch = 2
history = model.fit(X_train,
                    y_train,
                    batch_size=batch_size,
                    nb_epoch=nb_epoch,
                    verbose=1,
                    validation_data=(X_val, y_val),
                    shuffle=True,
                    callbacks=[early])

model.save('convnet_aws.kerasmodel')
Esempio n. 53
0
def main():
	srcImg = cv2.imread(image_training,cv2.IMREAD_GRAYSCALE)
	tgtImg = cv2.imread(image_expecting,cv2.IMREAD_GRAYSCALE)
	valImg = cv2.imread(image_validating,cv2.IMREAD_GRAYSCALE)
	rows = int(srcImg.shape[0] / img_size)
	columns = int(srcImg.shape[1] / img_size)
	losses = []
	metric = []
	accuracies = []
	num_of_epochs = []
	setTrain = None
	setTarget = None

	# Preparing training data.... 
	print ("Preparing training data....")
	for i in range(0, train_samples):
		r = random.randint(0, rows - 1)
		c = random.randint(0, columns - 1)
		
		y = r * img_size
		x = c * img_size
		h = img_size
		w = img_size
		
		srcTile = srcImg[y:y+h, x:x+w]
		tgtTile = tgtImg[y:y+h, x:x+w]
		
		trainIn = img_to_array(srcTile)    
		trainIn = trainIn.reshape(1,numNeurons)
		trainIn = np.apply_along_axis(prepareInput, 1, trainIn)

		trainOut = img_to_array(tgtTile)
		trainOut = trainOut.reshape(1,numNeurons)
		trainOut = np.apply_along_axis(prepareInput, 1, trainOut)
		
		if setTrain is None:
			setTrain = trainIn
		else:
			setTrain = np.vstack((setTrain, trainIn))
		
		if setTarget is None:
			setTarget = trainOut
		else:
			setTarget = np.vstack((setTarget, trainOut))

	# setting up the dnn model (fully connected feed forward dnn)
	model = Sequential()
	model.add(Dense(numNeurons, activation=activationFunction, input_shape=(numNeurons,), use_bias=True, bias_initializer='zeros', kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=42)))
	model.add(Dense(numNeurons, activation=activationFunction, input_shape=(int(numNeurons),), use_bias=True, bias_initializer='zeros', kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=42))) 
	model.add(Dense(numNeurons, activation=activationFunction, input_shape=(int(numNeurons),), use_bias=True, bias_initializer='zeros', kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=42))) 
	model.add(Dense(numNeurons, activation=activationFunction, input_shape=(numNeurons,), use_bias=True, bias_initializer='zeros', kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=42)))
	model.summary()

	sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
	model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy', metrics.binary_accuracy])

	# initialization magic for the ui plot
	plt.ion()

	ls = DynamicPlot()
	ls()

	#let's train the model
	cnt = 0
	for i in range(0, num_iterations): 
		history = model.fit(setTrain, setTarget,
					batch_size=batch_size,
					epochs=epochs,
					verbose=0,
					validation_data=(setTrain, setTarget))

		score = model.evaluate(setTrain, setTarget, verbose=0)
		cnt = cnt + epochs
		
		customScore = 0
		p = model.predict_on_batch(setTrain)
		
		a = setTrain.flatten()
		b = p.flatten()
		
		for j in range(0, a.size):
			customScore = customScore + (1- abs(a[j] - b[j]))
		
		customAccuracy = float(customScore) / a.size
		
		num_of_epochs.append(cnt)
		losses.append(score[0])
		metric.append(score[2])
		accuracies.append(customAccuracy)
		ls.drawPlot(np.asarray(num_of_epochs), np.asarray(losses),  np.asarray(metric), np.asarray(accuracies))
		
		print('Loss:', score[0])
		print('Metrics:', score[2])
		print ('Accuracy', customAccuracy)
		print('evaluating next iteration: ', i)


	#let's run a final prediction on another image for validation purposes

	#  Preparing input data for validation prediction....
	print ("Preparing input data for validation prediction....")

	setResult = None
	rows = int(valImg.shape[0] / img_size)
	columns = int(valImg.shape[1] / img_size)

	print(rows, columns)
	 
	for r in range(0, rows) :
		for c in range(0, columns):
			y = r * img_size
			x = c * img_size
			h = img_size
			w = img_size
			
			srcTile = valImg[y:y+h, x:x+w]
			srcIn = img_to_array(srcTile)    
			srcIn = srcIn.reshape(1,numNeurons)
			srcIn = np.apply_along_axis(prepareInput, 1, srcIn)
			if setResult is None:
				setResult = srcIn
			else:
				setResult = np.vstack((setResult, srcIn))

	print('Predicting....')
	result = model.predict_on_batch(setResult)
	s = np.shape(result)
	print(s)

	# preparing image for display
	print ('Preparing image for display')
	i = 0
	for r in range(0, rows):
		print('proccesing row: ', r)
		for c in range(0, columns):
			resMat = np.asmatrix(result[i])
			resMat = resMat.reshape(img_size,img_size)
			for x in range(0, img_size):
				for y in range(0, img_size):
					valImg[x + r * img_size,y + c * img_size] = int(255 * resMat[x,y])
			i = i + 1
	print('Calculations complete! Result image might not be visible, see taskbar. Hit enter in image to terminate run.')
			
	cv2.imshow('Result',valImg)
	cv2.waitKey(0) & 0xFF # see https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_image_display/py_image_display.html

	st = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
	directory = output_path + st

	# store the parameters of the trained network for later purposes
	if not os.path.exists(directory):
		os.makedirs(directory)

	# save the validation image
	resImage = directory + '\\result.png'
	cv2.imwrite(resImage, valImg)
	cv2.destroyAllWindows()

	modelFile = directory + '\\model.json'

	modelJson =  model.to_json()
	f = open(modelFile, 'w')
	f.write(modelJson)
	f.close()

	modelH5 = directory + '\\model.h5'
	model.save(modelH5)
Esempio n. 54
0
model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(Dense(config.encoding_dim, activation='relu'))
model.add(Dense(28 * 28, activation='sigmoid'))
model.add(Reshape((28, 28)))
model.compile(optimizer='adam', loss='mse')

model.summary()


class Images(Callback):
    def on_epoch_end(self, epoch, logs):
        indices = np.random.randint(self.validation_data[0].shape[0], size=8)
        test_data = self.validation_data[0][indices]
        pred_data = self.model.predict(test_data)
        run.history.row.update({
            "examples": [
                wandb.Image(np.hstack([data, pred_data[i]]), caption=str(i))
                for i, data in enumerate(test_data)
            ]
        })


model.fit(x_train,
          x_train,
          epochs=config.epochs,
          validation_data=(x_test, x_test),
          callbacks=[Images(), WandbCallback()])

model.save('auto-small.h5')
Esempio n. 55
0
# Create network
model = Sequential()
model.add(Embedding(len(token2idx)+2, 30, input_length=max_len, mask_zero=True))
model.add(LSTM(30, activation='tanh'))
model.add(Dense(outputs.shape[1], activation='softmax'))

# Train network
model.compile(optimizer=optimizers.Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(inputs, outputs, epochs=6, validation_split=0.2, batch_size=128)


## Evaluation
loss, acc = model.evaluate(test_inputs, test_outputs, batch_size=128)
print("Test accuracy:", acc)

model.save("model.keras")

## Apply the model
#from keras.models import load_model
#model = load_model("model.keras")

# Test model with example from test set
#print(' '.join([idx2token[int(x)] for x in test_inputs[17] if x > 0] ))
#model.predict(np.array([test_inputs[17]]))[0][1]


text = """I saw this movie first on the Berlin Film Festival, and I had never seen
Hong Kong cinema before. I felt like sitting in a roller coaster: the action was so
quick, and there wasn't one boring moment throughout the film. It has martial arts,
love, special effects and a fantastic plot. My favorite scene is when the Taoist
drinks, sings and fights for himself - one of the many scenes which stress the
Esempio n. 56
0
               return_sequences=True))  # try using a GRU instead, for fun
model.add(Dropout(0.1))
model.add(LSTM(32, return_sequences=True))  # try using a GRU instead, for fun
model.add(Dropout(0.1))
model.add(LSTM(32, return_sequences=True))  # try using a GRU instead, for fun
model.add(Dropout(0.1))
model.add(LSTM(32, return_sequences=False))  # try using a GRU instead, for fun
model.add(Dropout(0.1))
model.add(Dense(1))
model.add(Activation('sigmoid'))

# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(
    filepath="logs/lstm7/checkpoint-{epoch:02d}.hdf5",
    verbose=1,
    save_best_only=True,
    monitor='loss')
csv_logger = CSVLogger('logs/lstm7/training_set_iranalysis.csv',
                       separator=',',
                       append=False)
model.fit(X_train,
          y_train,
          batch_size=batch_size,
          nb_epoch=5000,
          validation_data=(X_test, y_test),
          callbacks=[checkpointer, csv_logger])
model.save("logs/lstm7/lstm1layer_model.hdf5")
#plot_confusion_matrix(cnf_matrix, classes=target_names, normalize=True,
#                      title='Normalized confusion matrix')
#plt.figure()
plt.show()

#%%
# Saving and loading model and weights
from keras.models import model_from_json
from keras.models import load_model

# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")

# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")

model.save('model.hdf5')
loaded_model=load_model('model.hdf5')

Esempio n. 58
0
           activation='relu',
           padding='same',
           kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=SGD(lr=0.01),
              metrics=['accuracy'])
model.fit(new_X_train, new_Y_train, epochs=10, batch_size=500)

import h5py
model.save('Trained_model.h5')

from keras.models import load_mode
HOST = '127.0.10.1'
PORT = 65432
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
    s.bind((HOST, PORT))
    s.listen()
    conn, addr = s.accept()
    with conn:
        print('Connected by', addr)
        data = conn.recv(4096)
        data = str(data)
        img = cv2.imread(data)
        image_array = np.array(img)
        image_array = image_array.astype('float32')
Esempio n. 59
0
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                 activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

model.save(os.path.join(model_path, "Keras_MNIST.h5"))


Esempio n. 60
0
class NN(object):
    def __init__(self,
                 xtrain,
                 ytrain,
                 xval,
                 yval,
                 xtest,
                 ytest,
                 wi=14,
                 dr=0.4,
                 ac='relu',
                 acpar=0.1,
                 bs=2048):

        # INITALIZE HYPERPARAMETERS ###
        self.width = wi  # Integer
        self.droprate = dr  # Float 0 <= x < 1
        self.activation = ac  # String 'relu' 'elu' 'sigmoid' etc.
        self.activation_par = acpar
        self.batchsize = bs  # Integer
        self.x_train = xtrain
        self.x_validate = xval
        self.x_test = xtest
        self.y_train = ytrain
        self.y_validate = yval
        self.y_test = ytest

        # GENERATE PATHNAME
        self.name = '{}{}{}{}{}'.format(self.activation, self.batchsize,
                                        self.droprate, self.width,
                                        self.activation_par)
        self.path = '{}{}'.format('../Data/Results/AutoEncoder/', self.name)

        # INITALIZE CHOICE OF KERAS FUNCTIONS #
        self.model = Sequential()

        self.sgd = optimizers.SGD(lr=0.01, momentum=0.001, decay=0.001)
        self.adagrad = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
        self.adam = optimizers.Adam(lr=0.001,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    epsilon=10e-8,
                                    decay=0.001,
                                    amsgrad=False)
        self.cb = callbacks.EarlyStopping(monitor='val_loss',
                                          min_delta=0.0001,
                                          patience=50,
                                          verbose=1,
                                          mode='min',
                                          baseline=None,
                                          restore_best_weights=True)
        initializers.VarianceScaling(scale=1.0,
                                     mode='fan_in',
                                     distribution='normal',
                                     seed=None)
        initializers.he_normal(151119)
        initializers.Zeros()

    def setup(self):
        start = 14
        width = 5
        a = np.arange(width, start, 1, dtype=np.integer)
        tempwidth = np.concatenate((np.flip(a[1:]), a))
        tempdepth = tempwidth.shape[0]

        # Add layers to model
        self.model.add(Dense(14, activation='linear'))

        if self.activation == 'LeakyReLU':
            for i in range(tempdepth):
                self.model.add(
                    Dense(tempwidth[i],
                          kernel_initializer='he_normal',
                          bias_initializer='zeros',
                          kernel_regularizer=regularizers.l2(0.01),
                          bias_regularizer=None,
                          activity_regularizer=None))
                self.model.add(LeakyReLU(alpha=0.2))
                self.model.add(Dropout(self.droprate))
        else:
            for i in range(tempdepth):
                self.model.add(
                    Dense(tempwidth[i],
                          activation=self.activation,
                          kernel_initializer='he_normal',
                          bias_initializer='zeros',
                          kernel_regularizer=regularizers.l2(0.01)))
                self.model.add(Dropout(self.droprate))

        self.model.add(Dense(14, activation='linear'))

    def run(self):
        NN.setup(self)
        print('Setup Successful')
        self.model.compile(optimizer=self.adam, loss='mean_squared_error')
        # , metrics=[nnu.metr_abs_dif, nnu.metr_rel_dif])
        print('Begin Fit')
        self.hist = self.model.fit(x=self.x_train,
                                   y=self.y_train,
                                   batch_size=self.batchsize,
                                   epochs=10000,
                                   verbose=1,
                                   callbacks=[self.cb],
                                   validation_split=0,
                                   validation_data=(self.x_validate,
                                                    self.y_validate),
                                   shuffle=True,
                                   class_weight=None,
                                   sample_weight=None,
                                   initial_epoch=0,
                                   steps_per_epoch=None,
                                   validation_steps=None)

    # Evaluate Trained Network ###
    def evaluate(self):
        self.testres = self.model.evaluate(self.x_test,
                                           self.y_test,
                                           batch_size=self.batchsize)

    def plot(self):
        Epochs = len(self.hist.history['loss'])
        loss = self.hist.history['loss']
        val_loss = self.hist.history['val_loss']

        Fig = plt.figure()
        plt.plot(loss, label='Training Loss')
        plt.plot(val_loss, label='Validation Loss')
        plt.hlines(self.testres, 0, Epochs, colors='k', label='Test loss')

        plt.text(round(Epochs - 5, -1), 1.1 * loss[-1], '%.3f' % loss[-1])
        plt.text(round(Epochs - 5, -1), 1.1 * val_loss[-1],
                 '%.3f' % val_loss[-1])
        plt.text(round(Epochs - 5, -1), 0.9 * self.testres,
                 '%.3f' % self.testres)

        plt.title(self.name)
        plt.xlabel('Epochs')
        plt.ylabel('Loss')

        plt.legend()

        Fig.savefig('{}{}'.format(self.path, '/Plot.pdf'), format='pdf')
        plt.show()

    def save(self):
        # check exist otherwise create subdirectory to save to

        if not os.path.exists(self.path):
            os.makedirs(self.path)
        # save NN as H5 file
        nameh5 = '{}{}'.format(self.path, '/network.h5')
        self.model.save(nameh5)

        namehist = '{}{}'.format(self.path, '/history')
        with open(namehist, 'wb') as file_pi:
            pickle.dump(self.hist.history, file_pi)

        nametest = '{}{}'.format(self.path, '/TestRes.txt')
        with open(nametest, 'w') as file_pi:
            file_pi.write('%f' % self.testres)

        with open(self.path + '/Report.txt', 'w') as fh:
            # Pass the file handle in as a lambda function to make it callable
            self.model.summary(print_fn=lambda x: fh.write(x + '\n'))