def dnn_class(X_train,y_train,X_test,y_test):
    
    model=Sequential()
    model.add(Dense(1000,input_dim=1583,kernel_initializer="glorot_uniform",activation="relu"))
    model.add(BatchNormalization())
    model.add(Dense(1000,kernel_initializer="glorot_uniform",activation="relu"))
    model.add(BatchNormalization())
# =============================================================================
#     model.add(Dropout(0.6))
# =============================================================================
    model.add(Dense(1,activation="sigmoid",kernel_initializer="glorot_uniform"))
    adam=Adam(lr=0.01)   # model.add(Dense(units=1,activation="relu",kernel_initializer="glorot_uniform"))
    sgd=SGD(lr=0.01, momentum=0.01, decay=0.0, nesterov=True)
    rms=RMSprop(lr=0.005)
    model.compile(optimizer=adam,loss="binary_crossentropy",metrics=["accuracy"])
    callbacks=[EarlyStopping(monitor='val_loss', patience=2),
             ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]
    print(model.summary())
    model.fit(X_train,y_train,batch_size=4,epochs=50,verbose=1,callbacks=callbacks,validation_data=(X_test,y_test))
    pkl_filename = "keras_model.joblib"
    with open(pkl_filename, 'wb') as file:
        joblib.dump(model, file)
    y_pred=model.predict_classes(X_test)
    print(y_pred)
    evaluation2(y_test,y_pred,model,X_test,X_train,y_train)
Пример #2
0
def lstm(trainData, trainMark, testData, embedding_dim, embedding_matrix, maxlen, output_len):
    # 填充数据,将每个序列长度保持一致
    trainData = list(sequence.pad_sequences(trainData, maxlen=maxlen,
                                            dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0,由于下面序号为0时,对应值也为0,因此可以这样
    testData = list(sequence.pad_sequences(testData, maxlen=maxlen,
                                           dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0

    # 建立lstm神经网络模型
    model = Sequential()  # 多个网络层的线性堆叠,可以通过传递一个layer的list来构造该模型,也可以通过.add()方法一个个的加上层
    # model.add(Dense(256, input_shape=(train_total_vova_len,)))   #使用全连接的输入层
    model.add(Embedding(len(embedding_matrix), embedding_dim, weights=[embedding_matrix], mask_zero=False,
                        input_length=maxlen))  # 指定输入层,将高维的one-hot转成低维的embedding表示,第一个参数大或等于0的整数,输入数据最大下标+1,第二个参数大于0的整数,代表全连接嵌入的维度
    # lstm层,也是比较核心的层
    model.add(LSTM(256))  # 256对应Embedding输出维度,128是输入维度可以推导出来
    model.add(Dropout(0.5))  # 每次在参数更新的时候以一定的几率断开层的链接,用于防止过拟合
    model.add(Dense(output_len))  # 全连接,这里用于输出层,1代表输出层维度,128代表LSTM层维度可以自行推导出来
    model.add(Activation('softmax'))  # 输出用sigmoid激活函数
    # 编译该模型,categorical_crossentropy(亦称作对数损失,logloss),adam是一种优化器,class_mode表示分类模式
    model.compile(loss='categorical_crossentropy', optimizer='sgd')

    # 正式运行该模型,我知道为什么了,因为没有补0!!每个array的长度是不一样的,因此才会报错
    X = np.array(list(trainData))  # 输入数据
    print("X:", X)
    Y = np.array(list(trainMark))  # 标签
    print("Y:", Y)
    # batch_size:整数,指定进行梯度下降时每个batch包含的样本数
    # nb_epoch:整数,训练的轮数,训练数据将会被遍历nb_epoch次
    model.fit(X, Y, batch_size=200, nb_epoch=10)  # 该函数的X、Y应该是多个输入:numpy list(其中每个元素为numpy.array),单个输入:numpy.array

    # 进行预测
    A = np.array(list(testData))  # 输入数据
    print("A:", A)
    classes = model.predict(A)  # 这个是预测的数据
    return classes
Пример #3
0
def train_model():
    # (X_train, Y_train, X_test, Y_test) = prapare_train()
    X_ = []
    with open('../data/train_matrix.out') as train_file:
        X_train = json.load(train_file)
        for x in X_train:
            a = len(x)
            print a/2
            x1 = x[:a/2]
            x2 = x[a/2:]
            x3 = []
            x3.append(x1)
            x3.append(x2)
            X_.append(x3)
    # X_test = pickle.load('../data/test_matrix.out')
    Y_train = [1,0,0]*3
    # Y_test = [1,0,0]*3
    # print len(X_train) - len(Y_train)
    # print len(X_test) - len(Y_test)
    model = Sequential()
    model = get_nn_model()
    model.compile(loss='binary_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])
    # model.fit(X_train, Y_train,
    #       batch_size=batch_size,
    #       nb_epoch=nb_epoch,
    #       validation_data=(X_test, Y_test))
#2
    model.fit(X_, Y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          validation_split = 0.2)
    print 'ok'
Пример #4
0
def main_separatemodels():
    X1, X2, y = generate_data2(TRAINING_SIZE)
    X1_test, X2_test, y_test = generate_data2(TEST_SIZE)

    print('Defining network...', file=sys.stderr)
    firstlstm = Sequential()
    firstlstm.add(Embedding(VOCABULARY_SIZE, EMBEDDING_DIMENSION))
    firstlstm.add(LSTM(EMBEDDING_DIMENSION, HIDDEN_DIMENSION, return_sequences=False))

    secondlstm = Sequential()
    secondlstm.add(Embedding(VOCABULARY_SIZE, EMBEDDING_DIMENSION))
    secondlstm.add(LSTM(EMBEDDING_DIMENSION, HIDDEN_DIMENSION, return_sequences=False))

    model = Sequential()
    model.add(Merge([firstlstm, secondlstm], mode='concat'))
    model.add(Dense(HIDDEN_DIMENSION + HIDDEN_DIMENSION, 1, activation='sigmoid'))
    print('Compiling...', file=sys.stderr)
    model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")

    print('Training...', file=sys.stderr)
    model.fit([X1, X2], y, batch_size=BATCH_SIZE, nb_epoch=EPOCHS,
              validation_split=0.05, show_accuracy=True)

    print("Testing...", file=sys.stderr)
    score, acc = model.evaluate([X1_test, X2_test], y_test, batch_size=BATCH_SIZE,
                                show_accuracy=True)
    print("Testing performance = " + str(score) + ", acc = " + str(acc))
Пример #5
0
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #6
0
def test_LambdaCallback():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    p.join()
    assert not p.is_alive()
Пример #7
0
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
    import shutil
    np.random.seed(np.random.randint(1, 1e7))
    filepath = str(tmpdir / 'logs')

    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            patience=4,
            verbose=1),
        callbacks.TensorBoard(
            log_dir=filepath)]

    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=2)

    assert os.path.isdir(filepath)
    shutil.rmtree(filepath)
    assert not tmpdir.listdir()
Пример #8
0
	def ann(self):
		#print self.company.X_train.shape[1]
		model = Sequential()
		model.add(Dense(input_dim=self.search_inputs.X_train.shape[1], output_dim=10, init="glorot_uniform"))
		model.add(Activation('tanh'))
		model.add(Dropout(0.1))
		model.add(Dense(input_dim=10, output_dim=10, init="uniform"))
		model.add(Activation('tanh'))
		model.add(Dropout(0.5))
		model.add(Dense(input_dim=10, output_dim=1, init="glorot_uniform"))
		model.add(Activation("linear"))

		sgd = SGD(lr=0.3, decay=1e-6, momentum=0.9, nesterov=True)
		model.compile(loss='mean_squared_error', optimizer='sgd')
		early_stopping = EarlyStopping(monitor='val_loss', patience=25)
		#epoch_score = model.evaluate(X_score, y_score, batch_size = 16) # this doesn't work
		# first model
		print "fitting first model"
		model.fit(self.search_inputs.X_train, self.search_inputs.y_train, nb_epoch=100, validation_split=.1, batch_size=100, verbose = 1, show_accuracy = True, shuffle = True, callbacks=[early_stopping])
		#score = model.evaluate(self.company.X_cv, self.company.y_cv, show_accuracy=True, batch_size=16)
		self.ann_preds = model.predict(self.search_inputs.X_test)
		#just in case (like w/ svr)
		for i in range(0,len(self.ann_preds) - 1):
			if self.ann_preds[i] < 1:
				self.ann_preds[i] = 1.00
			elif self.ann_preds[i] > 3:
				self.ann_preds[i] = 3.00

		self.search_inputs.fin_df['relevance'] = np.array(self.ann_preds) # easy swap in / out 
		final_file_ann = self.search_inputs.fin_df.to_csv(self.fin_file_name+'_ann.csv', float_format='%.5f', index=False)
Пример #9
0
def test_EarlyStopping():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    mode = 'max'
    monitor = 'val_acc'
    patience = 0
    cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
    history = model.fit(X_train, y_train, batch_size=batch_size,
                        validation_data=(X_test, y_test), callbacks=cbks, epochs=20)

    mode = 'auto'
    monitor = 'val_acc'
    patience = 2
    cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
    history = model.fit(X_train, y_train, batch_size=batch_size,
                        validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
Пример #10
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #11
0
def train_rnn(character_corpus, seq_len, train_test_split_ratio):
    model = Sequential()
    model.add(Embedding(character_corpus.char_num(), 256))
    model.add(LSTM(256, 5120, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
    model.add(Dropout(0.5))
    model.add(TimeDistributedDense(5120, character_corpus.char_num()))
    model.add(Activation('time_distributed_softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    seq_X, seq_Y = character_corpus.make_sequences(seq_len)

    print "Sequences are made"

    train_seq_num = train_test_split_ratio*seq_X.shape[0]
    X_train = seq_X[:train_seq_num]
    Y_train = to_time_distributed_categorical(seq_Y[:train_seq_num], character_corpus.char_num())

    X_test = seq_X[train_seq_num:]
    Y_test = to_time_distributed_categorical(seq_Y[train_seq_num:], character_corpus.char_num())

    print "Begin train model"
    checkpointer = ModelCheckpoint(filepath="model.step", verbose=1, save_best_only=True)
    model.fit(X_train, Y_train, batch_size=256, nb_epoch=100, verbose=2, validation_data=(X_test, Y_test), callbacks=[checkpointer])

    print "Model is trained"

    score = model.evaluate(X_test, Y_test, batch_size=512)

    print "valid score = ", score

    return model
def get_ts_model( trainX, trainY, look_back = 1, nb_epochs = 100 ):
    model = Sequential()
    # takes input array of shape (*, 1) where (2,1) - (row,col) array example looks like [23]
    # 																					 [43]
    model.add(LSTM(20, input_shape=(None , look_back) ))
    #model.add(LSTM(20,  batch_input_shape=(None, None, look_back), return_sequences= True ))
    #print(model.summary)
    model.add( Dense(1) )
    model.add(Dense(1))
    model.add(Dense(1))
    model.add(Dense(1))
    model.add(Dense(1))
    model.add(Dense(1))
    #model.add(LSTM(1, return_sequences= True))
    #model.add(LSTM(1))
    # outputs array of shape (*,1)
    #model.add(Dense(1))
    #model.compile(loss='mean_absolute_error', optimizer='SGD')  # mape
    #model.compile(loss='poisson', optimizer='adam')  # mape
    model.compile( loss =  'mean_squared_error', optimizer = 'adam' ) # values closer to zero are better.
    #model.compile(loss='mean_squared_error', optimizer='adagrad')
    # Values of MSE are used for comparative purposes of two or more statistical meythods. Heavily weight outliers,  i.e weighs large errors more heavily than the small ones.
    # "In cases where this is undesired, mean absolute error is used.
    # REF: Available loss functions  https://keras.io/objectives.
    print('Start : Training model')
    # default  configuration
    model.fit(trainX, trainY, nb_epoch=nb_epochs, batch_size=1, verbose=2)
    #model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)
    print('Ends : Training Model')
    return model
Пример #13
0
def create_model(x_train, y_train, x_test, y_test):
    """
    Create your model...
    """
    layer_1_size = {{quniform(12, 256, 4)}}
    l1_dropout = {{uniform(0.001, 0.7)}}
    params = {
        'l1_size': layer_1_size,
        'l1_dropout': l1_dropout
    }
    num_classes = 10
    model = Sequential()
    model.add(Dense(int(layer_1_size), activation='relu'))
    model.add(Dropout(l1_dropout))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])
    model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    out = {
        'loss': -acc,
        'score': score,
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out
def runNN(train_X, train_y, test_X, test_y=None):
	#sc = preprocessing.StandardScaler()
        #train_X = sc.fit_transform(train_X)
        #test_X = sc.transform(test_X)

	train_y = np_utils.to_categorical(train_y, 2)

        model = Sequential()
        model.add(Dense(train_X.shape[1], 100, init='he_uniform'))
        model.add(Activation('relu'))
        model.add(Dropout(0.2))
        model.add(Dense(100, 50, init='he_uniform'))
        model.add(Activation('relu'))
        model.add(Dropout(0.4))
        model.add(Dense(50, 25, init='he_uniform'))
        model.add(Activation('relu'))
        model.add(Dropout(0.4))
        model.add(Dense(25, 2, init='he_uniform'))
        model.add(Activation('softmax'))

        #sgd_opt = SGD(lr=0.01)
        model.compile(loss='binary_crossentropy', optimizer='adam')

        model.fit(train_X, train_y, batch_size=256, nb_epoch=50, validation_split=0.05, verbose=2)
        preds = model.predict(test_X, verbose=0)[:,1]
	print preds[:10]
	print "Test preds shape : ",preds.shape
	print "ROC AUC score : ", metrics.roc_auc_score(test_y, preds)
Пример #15
0
def train():

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.2))
    model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    print(X_train.shape)
    print(y_train.shape)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test,
                                batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)

    with open("save_weight_lstm.pickle", mode="wb") as f:
        pickle.dump(model.get_weights(),f)
Пример #16
0
def model(X_train, Y_train, X_test, Y_test):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test,
                           show_accuracy=True, verbose=0)
    print('Test accuracy:', score[1])
    return {'loss': -score[1], 'status': STATUS_OK}
Пример #17
0
def main():
	train_X = np.load('train_X.npy')
	train_y = np.load('train_y.npy')
	test_X = np.load('test_X.npy')
	test_y = np.load('test_y.npy')

	model = Sequential()
	model.add(Flatten(input_shape=(15,60,2)))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(900))
	model.add(Activation('sigmoid'))

	print model.summary()

	adam = Adam(0.001)
	#adagrad = Adagrad(lr=0.01)
	model.compile(loss='mse', optimizer=adam)

	model.fit(train_X, train_y, batch_size=batch_size, nb_epoch=nb_epoch,
	          verbose=1, validation_data=(test_X, test_y))
	model.save_weights('model.h5', overwrite=True)
Пример #18
0
def OptKeras(h1, h2, h3, d1, d2, d3, d4, ne):
    model = Sequential()
    model.add(Dense(dims, h1, init='glorot_uniform'))
    model.add(PReLU((h1,)))
    model.add(BatchNormalization((h1,)))
    model.add(Dropout(d1))

    model.add(Dense(h1, h2, init='glorot_uniform'))
    model.add(PReLU((h2,)))
    model.add(BatchNormalization((h2,)))
    model.add(Dropout(d2))

    model.add(Dense(h2, h3, init='glorot_uniform'))
    model.add(PReLU((h3,)))
    model.add(BatchNormalization((h3,)))
    model.add(Dropout(d3))

    model.add(Dense(h3, nb_classes, init='glorot_uniform'))
    model.add(Activation('softmax'))

    sgd = SGD(lr = 0.1, decay = 1e-6, momentum = 0.9, nesterov = True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    print("Training model...kkk")

    model.fit(X, y, nb_epoch = ne, batch_size=1024, validation_split=0.2)
Пример #19
0
class MotifScoreRNN(Model):

    def __init__(self, input_shape, gru_size=10, tdd_size=4):
        self.model = Sequential()
        self.model.add(GRU(gru_size, return_sequences=True,
                           input_shape=input_shape))
        if tdd_size is not None:
            self.model.add(TimeDistributedDense(tdd_size))
        self.model.add(Flatten())
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))
        print('Compiling model...')
        self.model.compile(optimizer='adam', loss='binary_crossentropy')

    def train(self, X, y, validation_data):
        print('Training model...')
        multitask = y.shape[1] > 1
        if not multitask:
            num_positives = y.sum()
            num_sequences = len(y)
            num_negatives = num_sequences - num_positives
        self.model.fit(
            X, y, batch_size=128, nb_epoch=100,
            validation_data=validation_data,
            class_weight={True: num_sequences / num_positives,
                          False: num_sequences / num_negatives}
            if not multitask else None,
            callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
            verbose=True)

    def predict(self, X):
        return self.model.predict(X, batch_size=128, verbose=False)
Пример #20
0
def f_nn(params):   
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import Adadelta, Adam, rmsprop

    print ('Params testing: ', params)
    model = Sequential()
    model.add(Dense(output_dim=params['units1'], input_dim = X.shape[1])) 
    model.add(Activation(params['activation1']))
    model.add(Dropout(params['dropout1']))

    model.add(Dense(output_dim=params['units2'], init = "glorot_uniform")) 
    model.add(Activation(params['activation2']))
    model.add(Dropout(params['dropout2']))

    if params['choice']['layers']== 'three':
        model.add(Dense(output_dim=params['choice']['units3'], init = "glorot_uniform")) 
        model.add(Activation(params['choice']['activation3']))
        model.add(Dropout(params['choice']['dropout3']))    

    model.add(Dense(2))
    model.add(Activation('sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer=params['optimizer'])

    model.fit(X, y, nb_epoch=params['nb_epochs'], batch_size=params['batch_size'], verbose = 1)

    pred_auc =model.predict_proba(X_val, batch_size = 128, verbose = 1)
    acc = log_loss(y_val, pred_auc)

    print("\n")
    print('logloss:', acc)
    sys.stdout.flush() 
    return {'loss': acc, 'status': STATUS_OK}
Пример #21
0
def evaluate(lr, pos):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = (X_train.astype("float32")).reshape((60000, 784))
    X_test = (X_test.astype("float32")).reshape((10000, 784))
    X_train /= 255
    X_test /= 255

    Y_train = np_utils.to_categorical(y_train, 10)
    Y_test = np_utils.to_categorical(y_test, 10)

    model = Sequential()
    model.add(Dense(output_dim=layer1, input_dim=784))
    if pos == 0:
        model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dense(output_dim=layer2, input_dim=layer1))
    if pos == 1:
        model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dense(output_dim=10, input_dim=layer2))
    if pos == 2:
        model.add(BatchNormalization())
    model.add(Activation("softmax"))

    model.compile(
        loss="categorical_crossentropy", optimizer=SGD(lr=lr, momentum=0.9, nesterov=True), metrics=["accuracy"]
    )

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    return score[1]
Пример #22
0
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #23
0
def imdb_lstm():
    max_features = 20000
    maxlen = 80  # cut texts after this number of words (among top max_features most common words)
    batch_size = 32
    (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
    print type(X_train)
    exit(0)
    print len(X_train), 'train sequences'
    print len(X_test), 'test sequences'
    print('Pad sequences (samples x time)')
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, dropout=0.2))
    model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
                        validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)
Пример #24
0
class MLP:
    '''
    [(output_dim, input_dim, init, activation, dropout)]
    '''
    def __init__(self\
                 , structure\
                 , sgd_params_init = sgd_params(0.1,1e-6,0.9,True)\
                 , loss_name = 'mean_squared_error'):
        
        self.model = Sequential()
        for layers in structure:
            self.model.add(Dense(output_dim = layers.output_dim\
                                 , input_dim = layers.input_dim\
                                 , init = layers.init\
                                 , activation = layers.activation))
            if layers.dropout != None:
                self.model.add(Dropout(layers.dropout))
                sgd = SGD(lr = sgd_params_init.lr\
                          , decay = sgd_params_init.decay\
                          , momentum = sgd_params_init.momentum\
                          , nesterov = sgd_params_init.nesterov)

        self.model.compile(loss = loss_name, optimizer = sgd)

    def train(self, X_train, y_train, nb_epoch = 20, batch_size = 16):
        self.model.fit(X_train, y_train, nb.epoch, batch_size)    

    def test(self, X_test, y_test, batch_size = 16):
        return self.model.evaluate(X_test, y_test, batch_size)   
def train_the_nn(features, label, look_back = 1):
    model = Sequential()
    model.add(Dense(8, input_dim=look_back, activation='relu'))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer = 'adam')
    model.fit(features, label, nb_epoch=200, batch_size=2, verbose=2)
    return model
Пример #26
0
 def generateModel(self,docSeries):
     topics = docSeries.topicSeries.keys()
     seriesLength = 50
     sequenceTuples = []
     for j in range(len(topics)):
         topic = topics[j]
         topicLength = len(docSeries.topicSeries[topic])
         for i in range(0,topicLength):
             if i+seriesLength < topicLength:
                 sequenceTuples.append((docSeries.topicSeries[topic][i:i+seriesLength],j))
     random.shuffle(sequenceTuples)
     X = []
     y = []
     for s,l in sequenceTuples:
         X.append(s)
         y.append(l)
     X = np.array(X).astype(np.uint8)
     y = np_utils.to_categorical(np.array(y)).astype(np.bool)
     X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
     print len(X_train),len(y_train)
     print X.shape,y.shape
     model = Sequential()
     model.add(Embedding(50, 64, input_length = seriesLength, mask_zero = True))
     model.add(LSTM(64,init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences=False))
     model.add(Dropout(0.5))
     model.add(Dense(len(topics)))
     model.add(Activation('softmax'))
     model.compile(loss='categorical_crossentropy', optimizer='adam', class_mode='categorical')
     early_stopping = EarlyStopping(patience=5, verbose=1)
     model.fit(X_train, y_train,nb_epoch=20,show_accuracy=True,verbose=1,shuffle=True)
     preds = model.predict_classes(X_test, batch_size=64, verbose=0)
     print '\n'
     print(classification_report(np.argmax(y_test, axis=1), preds, target_names=topics))
Пример #27
0
def trainNN():
    # POSITIVE training data
    posPX, posSX = getAllWindowedMinMaxPositiveTrainingData('./sample/example30', preSize=10, postSize=20)
    posPY = np.array([[1]] * len(posPX))
    posSY = np.array([[1]] * len(posSX))

    # NEGATIVE training data
    negX = getSomeWindowedMinMaxNegativeTrainingData('./sample/example30/', size=30, num=200)
    negY = np.array([[0]] * 200)

    # ALL training data
    X = np.concatenate([posPX, posSX, negX])
    Y = np.concatenate([posPY, posSY, negY])

    # 使用keras创建神经网络
    # Sequential是指一层层堆叠的神经网络
    # Dense是指全连接层
    # 定义model
    model = Sequential()
    model.add(Dense(50, input_dim=30, activation='sigmoid'))
    model.add(Dense(50, activation='sigmoid'))
    model.add(Dense(10, activation='sigmoid'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    # model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
    model.fit(X, Y, epochs=200, batch_size=32)
    model.save('model.h5')
    return model
Пример #28
0
def CNN_3_layer(activation):
    Xtrain, ytrain, XCV, yCV, Xtest, ytest = load_data("mnist.pkl.gz")
    Xtrain = Xtrain.reshape(Xtrain.shape[0], 1, 28, 28)
    Xtest = Xtest.reshape(Xtest.shape[0], 1, 28, 28)
    XCV = Xtest.reshape(XCV.shape[0], 1, 28, 28)
    # 0~9 ten classes
    ytrain = np_utils.to_categorical(ytrain, 10)
    ytest = np_utils.to_categorical(ytest, 10)
    yCV = np_utils.to_categorical(yCV, 10)
    # Build the model
    model = Sequential()
    model.add(Convolution2D(32,3,3,border_mode='valid',input_shape=(1,28,28)))
    model.add(Activation(activation))
    model.add(Convolution2D(32,3,3))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(16,3,3))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation(activation))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation('softmax'))
	# fit module
    print "fit module"
    model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
    model.fit(Xtrain,ytrain,batch_size=100,nb_epoch=20,verbose=1,validation_data=(XCV,yCV))
    score = model.evaluate(Xtest,ytest, verbose=0)
    print score[0]
    print score[1]
Пример #29
0
def trainModel():
    inputs, correctOutputs = getNNData()

    print("Collected data")

    trainingInputs = inputs[:len(inputs)//2]
    trainingOutputs = correctOutputs[:len(correctOutputs)//2]

    testInputs = inputs[len(inputs)//2:]
    testOutputs = correctOutputs[len(correctOutputs)//2:]

    model = Sequential()
    model.add(Dense(24, input_shape=(24, )))
    model.add(Activation('tanh'))
    model.add(Dense(24))
    model.add(Activation('tanh'))
    model.add(Dense(5))
    model.add(Activation('softmax'))

    model.summary()

    model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True))

    model.fit(trainingInputs, trainingOutputs, validation_data=(testInputs, testOutputs))
    score = model.evaluate(testInputs, testOutputs, verbose=0)
    print(score)

    json_string = model.to_json()
    open('my_model_architecture.json', 'w').write(json_string)
    model.save_weights('my_model_weights.h5', overwrite=True)
Пример #30
0
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #31
0
    store.append(b)

    trainX1 = numpy.array(trainX)

    testX1 = numpy.array(testX)
    trainY1 = numpy.array(trainY)
    testY1 = numpy.array(testY)
    store = numpy.array(store)

    ### reshape input to be [samples, time steps, features]
    trainX1 = numpy.reshape(trainX1, (trainX1.shape[0], 1, trainX1.shape[1]))
    testX1 = numpy.reshape(testX1, (testX1.shape[0], 1, testX1.shape[1]))

    ### create and fit the LSTM network

    model.fit(trainX1, trainY1, epochs=100, batch_size=50, verbose=2)
    ### make predictions
    model.save('models/model_weights_' + prefix_st + '.h5')

    print(i)
    trainPredict = predict_from_saved_model(trainX1)
    testPredict = predict_from_saved_model(testX1)

    p = [testY1[-1]]
    q = testPredict[-1][0]

    data.append([q])
    trainPredict = scaler.inverse_transform(trainPredict)
    trainY1 = scaler.inverse_transform([trainY1])
    testPredict = scaler.inverse_transform(testPredict)
    testY1 = scaler.inverse_transform([testY1])
#Train test split.
X_tr, X_val, y_tr, y_val = train_test_split(x, y, test_size=0.2, random_state=63)

"""Step-5:Building the SimpleRNN, LSTM, GRU models."""

#Define SimpleRNN model.
SimpleRNN_model = Sequential()
SimpleRNN_model.add(Embedding(vocab, 50, input_length=length-1, trainable=True))
SimpleRNN_model.add(SimpleRNN(150, recurrent_dropout=0.1, dropout=0.1))
SimpleRNN_model.add(Dense(vocab, activation='softmax'))
print(SimpleRNN_model.summary())

#Compile the SimpleRNN model.
SimpleRNN_model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam')
SimpleRNN_model.fit(X_tr, y_tr, epochs=20, verbose=2, validation_data=(X_val, y_val))

#Define LSTM model.
LSTM_model = Sequential()
LSTM_model.add(Embedding(vocab, 50, input_length=length-1, trainable=True))
LSTM_model.add(LSTM(150, recurrent_dropout=0.1, dropout=0.1))
LSTM_model.add(Dense(vocab, activation='softmax'))
print(LSTM_model.summary())

#Compile the LSTM model.
LSTM_model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam')
LSTM_model.fit(X_tr, y_tr, epochs=100, verbose=2, validation_data=(X_val, y_val))

#Define GRU model.
GRU_model = Sequential()
GRU_model.add(Embedding(vocab, 50, input_length=length-1, trainable=True))
# posa steps istorikou krataei, dld gia eisodo x[0:lookback-1] provlepei thn eksodo x[lookback]
look_back = 10
# posa steps tha kanw prediction
next_pred = 30
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

# model
model = Sequential()
model.add(LSTM(10, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])

model.fit(trainX, trainY, epochs=10, batch_size=1, verbose=2)
# save model
model.save('STOCK-LSTM.h5')

# make predictions
trainPredict = model.predict(trainX)
print("trainX shape ",trainX.shape,"\ntrainPredict ",trainPredict.shape,"\ndataset ",dateset.shape)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f MSE' % (trainScore))
Пример #34
0
# 딥러닝 텐서플로 케라스 교재 201p 


model.save('./model/sample/mnist/model_mnist.h5') 

# 훈련 


from keras.callbacks import EarlyStopping, ModelCheckpoint 
early_stopping = EarlyStopping( monitor='loss', patience= 100, mode ='auto')

model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics = ['acc'])

modelpath = './model/sample/mnist{epoch:02d} - {val_loss: .4f}.hdf5' 
checkpoint = ModelCheckpoint(filepath= modelpath, monitor= 'val_loss', save_best_only = True, save_weights_only= False, verbose=1)

model.fit(x_train,y_train, epochs= 15, batch_size= 120, validation_split= 0.25 ,callbacks= [early_stopping,checkpoint])


model.save_weights('./model/sample/mnist/mnist_weight1.h5')

# 평가 및 예측 


loss, acc = model.evaluate(x_test,y_test, batch_size=1)

  
print('loss :', loss)
print('accuracy : ', acc)
Пример #35
0
class Network:
    def __init__(self, recall=True):

        if recall:
            self.model = load_model(RecallFile)

        else:
            self.model = Sequential()

            self.model.add(Dense(15, input_dim=14, activation='sigmoid'))
            self.model.add(Dense(30, activation='sigmoid'))
            self.model.add(Dense(10, activation='sigmoid'))
            self.model.add(Dense(3, activation='softmax'))

            self.model.compile(loss='binary_crossentropy', optimizer='nadam')

    def testOnData(self, data, tp):  # Expects inputs + outputs
        total = len(data[0])
        correct = 0
        predictions = self.predict(data[0]).tolist()
        # print(data[0].tolist())
        # print(predictions)

        if tp == 'pricelow':  # Convert to 0s and 1s for analysis (PRICELOW)
            for s in range(len(predictions)):
                biggest = [0, 0]
                for value in predictions[s]:
                    if value > biggest[0]:
                        biggest[0] = value
                        biggest[1] = predictions[s].index(value)

                output = [0, 0, 0]
                output[biggest[1]] = 1
                predictions[s] = output

        if tp == 'airline':
            output = [0, 0, 0]
            # print(predictions)
            for s in range(len(predictions)
                           ):  # Convert to 0s and 1s for analysis (AIRLINE)
                for value in predictions[s]:
                    if value >= 0.4:
                        predictions[s][predictions[s].index(value)] = 1
                    else:
                        predictions[s][predictions[s].index(value)] = 0
        # print(predictions)
        # print(data[1].tolist())
        for s in range(len(predictions)):
            if predictions[s] == data[1][s].tolist():
                correct += 1

        return [correct, total]

    def train(self, inputs, outputs):
        self.model.fit(inputs, outputs, nb_epoch=epochs)

    def predict(self, inputs):
        return self.model.predict(inputs)

    def saveModel(self):
        self.model.save(RecallFile)
Пример #36
0
# Adding the second hidden layer
# classifier.add(Dense(output_dim=5, init='uniform', activation='relu'))

# Adding the output layer
classifier.add(Dense(output_dim=1, init='uniform', activation='relu'))

# Compiling Neural Network
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])


# ------------------------------Training and Evaluation----------------------------------------------- #

# split data into train-set and test-set
kfold = RepeatedStratifiedKFold(n_splits=5, n_repeats=5)
# kfold = StratifiedKFold(n_splits=5, shuffle=True)

f1_measures = []
for train_set, test_set in kfold.split(X, y):
    # fitting our model
    classifier.fit(X[train_set], y[train_set], batch_size=15, epochs=50, verbose=0)

    # evaluate the model
    # 1. predicting the test set results
    y_pred = classifier.predict(X[test_set])
    y_pred = (y_pred > 0.5).astype(int)
    # 2. calculate f1 measure: F1 = 2 * (precision * recall) / (precision + recall)
    f1_measures.append(f1_score(y_true=y[test_set], y_pred=y_pred))

print(np.mean(f1_measures))
Пример #37
0
y = array([4,5,6,7,8,9,10,11,12,13,50,60,70])

x = x.reshape(x.shape[0],x.shape[1],1)

model = Sequential()
model.add(LSTM(20,activation='relu',input_shape=(3,1)))
model.add(Dense(100))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(100))
model.add(Dense(5))
model.add(Dense(1))
model.summary()
#3.훈련
model.compile(loss ='mse', optimizer ='adam', metrics=['mse'])       
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss', patience=20, mode='auto')
model.fit(x,y,epochs=1000,batch_size=1,verbose=1,callbacks=[early_stopping])
loss, mae = model.evaluate(x,y,batch_size=1)
print(loss,mae)

x_input = array([[6.5,7.5,8.5],[50,60,70],[70,80,90],[100,110,120]])
x_input = x_input.reshape(x_input.shape[0],x_input.shape[1],1)

y_predict = model.predict(x_input)
print(y_predict)
# Initialising the ANN
classifier = Sequential()

# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim =11 ))

# Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))



# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))

# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])

# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size =10 , epochs =20 )

# Part 3 - Making predictions and evaluating the model

# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred >0.5 )

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
Пример #39
0
model.summary()

# Use early stopping to find the best model
BEST_MODEL_FILEPATH="best_model"

# Define early stopping based on validation loss
# If three iterations in a row, validation loss is not improved compared with the previous one, stop training
# Mode='min' indicate the loss needs to decrease 
earlyStopping=EarlyStopping(monitor='val_loss',                             patience=3, verbose=2,                             mode='min')

# Define checkpoint to save best model which has max. validation acc
checkpoint = ModelCheckpoint(BEST_MODEL_FILEPATH,                              monitor='val_acc',                              verbose=2,                              save_best_only=True,                              mode='max')

# Fit model
training = model.fit(X_train, y_train, shuffle = True, 
                     epochs = 1000, batch_size = 36,
                     callbacks=[earlyStopping, checkpoint],
                     validation_data=[X_test, y_test], verbose = 2)


# In[4]:


# Covert the fitting history from dictionary to dataframe
history=pd.DataFrame.from_dict(training.history)
history.columns=["train_loss", "train_acc",             "val_loss", "val_acc"]
history.index.name='epoch'
#print(history)

# Plot fitting history
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8,3));
history[["train_acc", "val_acc"]].plot(ax=axes[0]);
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1] * X_train.shape[2]).astype(float)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1] * X_test.shape[2]).astype(float)

#To make pixel value ranges between [0, 1] as asked in the assignment, also improves the performance of the ANN
X_train/=255.0
X_test/=255.0

y_train = keras.utils.to_categorical(y_train, num_classes=num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes=num_classes)

model = Sequential()
model.add(Dense(20, input_dim=X_train.shape[1], activation='sigmoid'))
model.add(Dense(num_classes, activation='softmax'))

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(x=X_train, y=y_train, batch_size=batch_size, validation_split=0.2, epochs=epochs, verbose=1)

plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Number of Epochs')
plt.legend(['Train Accuracy', 'Test Accuracy'], loc='upper left')
plt.savefig('Accuracy.png')
plt.close()

plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
def modelling(task_no,train_input,test_input,hidden_nodes,number_epoch):

    n_hidden = hidden_nodes
    nb_classes = 784
    optimizer = 'RMSprop'     
    # optimizer = SGD(lr=0.001)
    loss = 'mean_squared_error'
    metrics = ['accuracy']
    batch_size = 128
    nb_epoch = number_epoch


    if task_no in ['4','5']:

        #laoding model
        json_file = open('model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model = model_from_json(loaded_model_json)
        # load weights into new model
        loaded_model.load_weights("model.h5")
        print("Loaded model from disk")
        loaded_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    else:

        #initializing model
        # input_img = Input(shape=(784,))
        # hidden_output = Dense(n_hidden, activation='relu')(input_img)
        # final_output = Dense(nb_classes, activation='linear')(hidden_output)
        # model = Model(input=input_img, output=final_output)
        # model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

        model = Sequential()
        model.add(Dense(n_hidden, input_shape=(784,), activation='relu')) 
        model.add(Dense(nb_classes, activation='linear'))
        model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
    

    #In all tasks as the target and input is the same we are passing input at both the places

    if task_no == '1':

        #task1
        history = model.fit(train_input,train_input, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_data=(test_input, test_input), shuffle=True)
        return history

    elif task_no == '2':
        
        #task2
        history = model.fit(train_input,train_input, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_data=(test_input, test_input), shuffle=True)
        train_score = model.evaluate(train_input, train_input, verbose=1)
        test_score = model.evaluate(test_input, test_input, verbose=1)
        return [train_score[0],test_score[0]]

    elif task_no == '3':
        
        #task3
        history = model.fit(train_input,train_input, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, shuffle=True)
        #saving mdoel to json file
        model_json = model.to_json()
        with open("model.json", "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save_weights("model.h5")
        print("Saved model to disk")
        weights, biases = model.layers[0].get_weights()
        return weights

    elif task_no in ['4','5']:
        
        #task4
        output = loaded_model.predict(test_input)
        return output
Пример #42
0
# 序列反转
x_train = [x[::-1] for x in x_train]
x_test = [x[::-1] for x in x_test]
# 填充序列
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
model = Sequential()
model.add(layers.Embedding(max_feature, 32))
model.add(layers.Bidirectional(layers.LSTM(32)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(
    optimizer='rmsprop',
    loss='binary_crossentropy',
    metrics=['acc']
)
history = model.fit(
    x_train, y_train,
    epochs=10,
    batch_size=128,
    validation_split=0.2
)

loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss)+1)
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def model_train():

    datasets = 'datasets'

    (images, labels, names, label) = ([], [], [], 0)
    for (subdirs, dirs, files) in os.walk(datasets):
        for subdir in dirs:
            if subdir == '.DS_Store':
                continue
            names.append(subdir)
            subjectpath = os.path.join(datasets, subdir)
            print('subjectpath',subjectpath)
            for filename in os.listdir(subjectpath):
                if filename == '.DS_Store':
                    continue
                path = subjectpath + '/' + filename
                print('Path',path)
                imgRead = load_img(path,target_size = (64,64))
                imgRead = img_to_array(imgRead)
                images.append(imgRead)
                labels.append(int(label))
                print(label)
            label += 1
    print(labels)
    print(np.shape(images))
    print(np.shape(labels))
    print(names)
    (width, height) = (130, 100)

    # Create a Numpy array from the two lists above
    (images, labels) = [np.array(lis) for lis in [images, labels]]

    # OpenCV trains a model from the images
    # NOTE FOR OpenCV2: remove '.face'
    X_train = np.array(images)
    Y_train = np.array(labels) 

    nb_classes = label
    Y_train = to_categorical(Y_train, nb_classes)
    # Y_test = to_categorical(Y_test, nb_classes)
    input_shape = (64, 64, 3)

    X_train = X_train.astype('float32')
    X_train /= 255

    # BUILD THE MODEL
    model = Sequential()

    model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    # TRAIN THE MODEL
    adam = Adam(lr=0.0001)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])

    print(model.summary())
    model.fit(X_train, Y_train, batch_size=5, epochs=30, verbose=1)

    model.save('models/model.h5')
    print("saved the model ______________")
    pickle.dump(names, open("models/list.pkl", "wb"))
    print("saved the PKL PKL  ______________")
Пример #44
0
def run_lstm(Xtr,
             Xte,
             ytr,
             yte,
             max_features,
             max_features2,
             out_size,
             embedding_size,
             hidden_size,
             batch_size,
             epochs=50,
             verbose=0,
             maxsent=0):

    print('Training and testing tensor shapes:', Xtr.shape, Xte.shape,
          ytr.shape, yte.shape)

    mf = max(max_features, max_features2)

    model1 = Sequential()
    model1.add(
        Embedding(input_dim=mf,
                  output_dim=embedding_size,
                  input_length=maxsent,
                  mask_zero=True))

    model2 = Sequential()
    model2.add(InputLayer(input_shape=(maxsent, Xtr.shape[2] - 1)))

    model = Sequential()
    model.add(Merge([model1, model2], mode='concat'))
    model.add(Dense(1))

    model.add(
        LSTM(hidden_size,
             return_sequences=True,
             input_shape=(maxsent, Xtr.shape[2] - 1)))
    model.add(TimeDistributed(Dense(out_size)))
    model.add(Activation('softmax'))
    print 'compile...'
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    #print(model.summary())
    print('train...')

    model.fit([Xtr[:, :, 0], Xtr[:, :, 1:Xtr.shape[2]]],
              ytr,
              epochs=epochs,
              verbose=verbose,
              batch_size=batch_size,
              validation_data=([Xte[:, :, 0], Xte[:, :, 1:Xtr.shape[2]]], yte))
    score = model.evaluate([Xte[:, :, 0], Xte[:, :, 1:Xtr.shape[2]]],
                           yte,
                           batch_size=batch_size,
                           verbose=verbose)

    print('Raw test score:', score)
    pr = model.predict_classes([Xtr[:, :, 0], Xtr[:, :, 1:Xtr.shape[2]]],
                               verbose=verbose)
    yh = ytr.argmax(2)  # no encoding
    fyh, fpr = score2(yh, pr)
    print('Training...')
    print(' - accuracy:', accuracy_score(fyh, fpr))
    print(' - confusion matrix:')
    print(confusion_matrix(fyh, fpr))
    print(' - precision, recall, f1, support:')
    print precision_recall_fscore_support(fyh, fpr)

    pr = model.predict_classes([Xte[:, :, 0], Xte[:, :, 1:Xte.shape[2]]],
                               verbose=verbose)
    yh = yte.argmax(2)
    fyh, fpr = score2(yh, pr)
    print('Testing...')
    print(' - accuracy:', accuracy_score(fyh, fpr))
    print(' - confusion matrix:')
    print(confusion_matrix(fyh, fpr))
    print(' - precision, recall, f1, support:')
    print precision_recall_fscore_support(fyh, fpr)
    print(
        '----------------------------------------------------------------------------------'
    )
df = df_pre.sample(frac=1)

dataset = df.values
X = dataset[:,0:12]
Y = dataset[:,12]

# 모델의 설정
model = Sequential()
model.add(Dense(30,  input_dim=12, activation='relu'))
model.add(Dense(12, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

# 모델 컴파일
model.compile(loss='binary_crossentropy',
          optimizer='adam',
          metrics=['accuracy'])

# 모델 저장 폴더 설정
MODEL_DIR = './model/'
if not os.path.exists(MODEL_DIR):
   os.mkdir(MODEL_DIR)

# 모델 저장 조건 설정
modelpath="./model/{epoch:02d}-{val_loss:.4f}.hdf5"
checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1, save_best_only=True)

# 모델 실행 및 저장
model.fit(X, Y, validation_split=0.2, epochs=200, batch_size=200, verbose=0, callbacks=[checkpointer])

Пример #46
0
model.add(
    Conv2D(32,
           3,
           3,
           activation='relu',
           padding='same',
           input_shape=(28, 28, 1)))
model.add(Conv2D(32, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.summary()

model.fit(X_train, y_train, batch_size=32, epochs=10, verbose=1)
score = model.evaluate(X_test, y_test, verbose=0)
print(score[1])

print(model.predict(X_test[0]))
plt.imshow(X_test[0].reshape(28, 28))
plt.show()
Пример #47
0
# 필요한 라이브러리를 불러옵니다.
import numpy as np
import tensorflow as tf

# 실행할 때마다 같은 결과를 출력하기 위해 설정하는 부분입니다.
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)

# 준비된 수술 환자 데이터를 불러들입니다.
Data_set = np.loadtxt("../dataset/ThoraricSurgery.csv", delimiter=",")

# 환자의 기록과 수술 결과를 X와 Y로 구분하여 저장합니다.
X = Data_set[:, 0:17]
Y = Data_set[:, 17]

# 딥러닝 구조를 결정합니다(모델을 설정하고 실행하는 부분입니다).
model = Sequential()
model.add(Dense(30, input_dim=17, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

# 딥러닝을 실행합니다.
#model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.compile(loss='binary_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])
model.fit(X, Y, epochs=30, batch_size=10)

# 결과를 출력합니다.
print("\n Accuracy: %.4f" % (model.evaluate(X, Y)[1]))
Пример #48
0
model.add(Activation('relu'))
model.add(Conv2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
model.summary()

# train
model.compile(loss='categorical_crossentropy',
              optimizer=OPTIM,
              metrics=['accuracy'])
model.fit(X_train,
          y_train,
          batch_size=BATCH_SIZE,
          epochs=NB_EPOCH,
          validation_split=VALIDATION_SPLIT,
          verbose=VERBOSE)
score = model.evaluate(X_test, y_test, batch_size=BATCH_SIZE, verbose=VERBOSE)
print("test score:", score[0])
print("test accuracy:", score[1])

#save model
model_json = model.to_json()
model.save_weights('cifar10_weights.h5', overwrite=True)
dataset = loadtxt('/content/AMD_withoutHeaders.csv', delimiter=',')
x = dataset[:,0:12]
y = dataset[:,12]

#model format - models in Keras are defined as a sequence of layers
#we will use a Sequential Model and add layers as needed 
model = Sequential()

#model uses dense class for fully connected layers
#first argument = # of neurons/nodes, 'activation' argument is the activation function 
#relu activation function applied for first 2 layers, 8 args, sigmoid last for binary output
model.add(Dense(12, input_dim=12, activation='relu')) #input layer, where input_dim = number of input features
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

#compile model using bin_xen for loss fx and adam for stochastic gradient descent fx
#adam is an optimization algorithm that tunes itself and gives good results in a wide range of problems
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

#fit model trains or 'fits' our model - training occurs over epochs and each epoch is split into batches
#the number of epochs and batch size can be chosen experimentally by trial and error
model.fit(x, y, epochs=180, batch_size=10)

# after training our NN on the dataset, we will make class predictions with the model
predictions = model.predict_classes(x)

#summarize the first X cases - the goal is to achieve the lowest loss (0) and highest accuracy (1) possible
for i in range(len(x)):
  print('%s => predicted %d (expected %d)' % (x[i].tolist(), predictions[i], y[i]))

Пример #50
0
# Adding the second hidden layer
classifier.add(Dense(output_dim=15, init='uniform', activation='relu'))
classifier.add(Dense(output_dim=10, init='uniform', activation='relu'))

# Adding the output layer
classifier.add(Dense(output_dim=9, init='uniform', activation='sigmoid'))

# Compiling the ANN
classifier.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

# Convert labels to categorical one-hot encoding
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size=10, nb_epoch=500)

# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred_max_index = np.argmax(y_pred, axis=1)
y_test_max_index = np.argmax(y_test, axis=1)

# testing training set accuracy
y_pred_2 = classifier.predict(X_train)
y_pred_2_max_index = np.argmax(y_pred_2, axis=1)
y_train_max_index = np.argmax(y_train, axis=1)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test_max_index, y_pred_max_index)
Пример #51
0
def TwoNN_crossval(combinations, train, train_y, test, test_y, dims): #this performs k-fold and evaluates
    acc_per_fold = []
    loss_per_fold = []
    reports = []
    cf_matrices = []

    num_folds = 10
    kfold = KFold(n_splits=num_folds)
    inputs = np.concatenate((train, test), axis=0)
    targets = np.concatenate((train_y, test_y), axis=0)
    if len(combinations) == 1:
        for i in combinations.keys():
            a, b, d = i
            # K-fold Cross Validation model evaluation
            fold_no = 1
            for train, test in kfold.split(inputs, targets):
                model = Sequential()
                model.add(Dense(a, input_dim=dims, activation='relu'))
                model.add(Dense(b))
                model.add(Dropout(0.2))
                model.add(Activation('relu'))
                model.add(Dense(6, activation="softmax"))
                model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
                print('------------------------------------------------------------------------')
                print(f'Training for fold {fold_no} ...')
                model.fit(inputs[train], targets[train], epochs=d, batch_size=32, verbose=0)

                # Generate generalization metrics
                scores = model.evaluate(inputs[test], targets[test], verbose=0)
                print(
                    f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1] * 100}%')
                acc_per_fold.append(scores[1] * 100)
                loss_per_fold.append(scores[0])

                Y = np.argmax(targets[test], axis=1)  # here we build the precision/recall/f1 table for each fold
                y_pred = model.predict_classes(inputs[test])
                reports.append(classification_report(Y, y_pred, output_dict=True))
                cm = confusion_matrix(Y, y_pred)
                cm = cm / cm.astype(np.float).sum(axis=1)
                cf_matrices.append(cm.round(2))
                # Increase fold number
                fold_no += 1

            # == Provide average scores ==
            print('------------------------------------------------------------------------')

            print('Score per fold')
            f1_report = []
            for n in range(0, len(acc_per_fold)):
                #print('------------------------------------------------------------------------')
                #print(f'> Fold {n + 1} - Loss: {loss_per_fold[n]} - Accuracy: {acc_per_fold[n]}%')
                #print('------------------------------------------------------------------------')
                #print(f'> Per Class Report:\n{reports[n]}')
                f1_report.append(reports[n]['weighted avg']['f1-score'])
            print('------------------------------------------------------------------------')
            print(i)
            print('Average scores for all folds:')
            print(f'> Accuracy: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
            print(f'> F1-Score: {np.mean(f1_report).round(3)} (+-{np.std(f1_report).round(3)})')
            print(f'> Loss: {np.mean(loss_per_fold)}')
            print(f'> Confusion Matrix:\n{np.nanmean(cf_matrices, axis=0)}')
            print('------------------------------------------------------------------------')
            '''to be added if one wants to save the model:
            model_structure = model.to_json()
            with open(f"NN_tfidf_{i}.json", "w") as json_file:
               json_file.write(model_structure) 
            model.save_weights(f"Weights_tfidf_{i}")  
            '''
    else:
        for (neurons, val) in combinations.items():
            one_combo = dict()
            one_combo[neurons] = val
            TwoNN_crossval(one_combo, train, train_y, test, test_y, dims)
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical

# Convert the target to categorical: target
target = to_categorical(df.survived)

# Set up the model
model = Sequential()

# Add the first layer
model.add(Dense(32, activation = 'relu', input_shape = (n_cols,)))

# Add the output layer
model.add(Dense(2, activation = 'softmax'))

# Compile the model
model.compile(optimizer = 'sgd', loss='categorical_crossentropy', metrics = ['accuracy'] )

# Fit the model
model.fit(predictors, target)

# save model
from keras.models import load_model
model.save('model_class.h5')
model = load_model('model_class.h5')
#predictions
pred = model.predict(data)   #两列,第0列是0的概率,第1列是1的概率

Пример #53
0
model.add(GRU(50, init=weight_variable, input_shape=(300, n_in)))
model.add(BatchNormalization())
model.add(Dense(n_out, init=weight_variable))
model.add(Activation('sigmoid'))

optimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999)
model.compile(loss='binary_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])

epochs = 50
batch_size = 10

hist = model.fit(X_train,
                 Y_train,
                 batch_size=batch_size,
                 epochs=epochs,
                 validation_data=(X_validation, Y_validation))

######################     data_save      #################################

json_string = model.to_json()

open('train_GRU3.json', 'w').write(json_string)

model.save_weights("train_GRU3_param.hdf5")

model.save_weights("train_GRU3_param.h5")

# Accuracy
autoencoder.add(
    Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same'))
autoencoder.add(UpSampling2D(size=(2, 2)))
autoencoder.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
autoencoder.add(UpSampling2D(size=(2, 2)))
autoencoder.add(
    Conv2D(filters=1, kernel_size=(3, 3), activation='sigmoid',
           padding='same'))
autoencoder.summary()

autoencoder.compile(optimizer='adam',
                    loss='binary_crossentropy',
                    metrics=['accuracy'])
autoencoder.fit(previsores_treinamento,
                previsores_treinamento,
                epochs=10,
                batch_size=256,
                validation_data=(previsores_teste, previsores_teste))

encoder = Model(inputs=autoencoder.input,
                outputs=autoencoder.get_layer('flatten_15').output)
encoder.summary()

imagens_codificadas = encoder.predict(previsores_teste)
imagens_decodificadas = autoencoder.predict(previsores_teste)

numero_imagens = 10
imagens_teste = np.random.randint(previsores_teste.shape[0],
                                  size=numero_imagens)
plt.figure(figsize=(18, 18))
for i, indice_imagem in enumerate(imagens_teste):
Пример #55
0
model.add(Dropout(0.25))

# Adding a fully connected layer and then the output layer
model.add(Flatten(
))  # weights are being flattened (1D) before passing them to the Dense layer
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
# Final layer has output size of 10, corresponding to 10 digits
model.add(Dense(10, activation='softmax'))

# Compile the model and declare a loss function
model.compile(loss='categorical_crossentropy',
              optimizer='Adam',
              metrics=['accuracy'])

# To fit the model we have to declare the batch size and number of epochs to train for, then pass in the training data
model.fit(X_train, Y_train, batch_size=32, nb_epoch=10, verbose=1)

# Evaluate the model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
print("%s: %.2f%%" %
      (model.metrics_names[1], score[1] * 100))  # Print the model metrics

# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
Пример #56
0
model_adam.add(Dense(256))
model_adam.add(Activation('relu'))
model_adam.add(Dense(1))
model_adam.add(Activation('sigmoid'))
''' Setting optimizer as Adam '''
from keras.optimizers import SGD, Adam, RMSprop, Adagrad
model_adam.compile(loss='mean_squared_error',
                   optimizer='Adam',
                   metrics=['accuracy'])

with tf.device('/gpu:0'):
    '''Fit models and use validation_split=0.1 '''
    history_adam = model_adam.fit(X_train,
                                  Y_train,
                                  batch_size=batch_size,
                                  epochs=nb_epoch,
                                  verbose=1,
                                  shuffle=True,
                                  validation_split=0.1)

model_adam.save_weights('cm-weights.hdf5')

from sklearn.metrics import confusion_matrix, accuracy_score
pred_y = np.sort(model_adam.predict(X_train), axis=0)
print(pred_y)

from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
blah_y = min_max_scaler.fit_transform(pred_y).reshape(-1)
print(blah_y)
np.savetxt("pred_credit.csv", blah_y, delimiter=",")
Пример #57
0
class nn_new():
    def __init__(self,
                 input_data,
                 output_data,
                 hidden_layer_nodes,
                 classification=False,
                 layer_activation='relu ',
                 output_activation='linear',
                 epochs=60,
                 batch_size=10,
                 scale_range=(-1, 1)):

        self.hidden_layer_nodes = hidden_layer_nodes
        self.layer_activation = layer_activation if layer_activation in [
            'sigmoid', 'tanh', 'relu', 'elu', 'exponential', 'relu', 'linear',
            'softmax', 'softplus', 'softsign'
        ] else 'relu'
        self.output_activation = output_activation if output_activation in [
            'sigmoid', 'tanh', 'relu', 'elu', 'exponential', 'relu', 'linear',
            'softmax', 'softplus', 'softsign'
        ] else 'linear'

        self.classification = classification

        self.input_scaler = sklearn.preprocessing.MinMaxScaler(
            feature_range=scale_range)
        self.input_scaler.fit(input_data)
        input_data = self.input_scaler.transform(input_data)

        self.output_scaler = sklearn.preprocessing.MinMaxScaler(
            feature_range=scale_range)
        self.output_scaler.fit(output_data)

        if classification:
            self.metric = 'accuracy'

            if output_data.shape[1] > 1:
                # ASSUMES labels are one hot encoded
                self.output_activation = "softmax"
                self.loss = "categorical_crossentropy"
            else:
                # ASSUMES labels of 0 or 1
                self.output_activation = "sigmoid"
                self.loss = 'binary_crossentropy'

        else:
            output_data = self.output_scaler.transform(output_data)
            self.metric = "mae"
            self.loss = "mse"

        self.model = Sequential()

        self.model.add(
            Dense(hidden_layer_nodes[0],
                  input_dim=input_data.shape[1],
                  kernel_initializer=initializers.he_uniform,
                  activation=self.layer_activation))

        for i in range(1, len(hidden_layer_nodes) - 1):
            self.model.add(
                Dense(hidden_layer_nodes[i],
                      activation=self.layer_activation,
                      kernel_initializer=initializers.he_uniform))

        self.model.add(
            Dense(output_data.shape[1],
                  activation=self.output_activation,
                  kernel_initializer=initializers.he_uniform))

        self.model.compile(loss=self.loss,
                           optimizer='adam',
                           metrics=[self.metric])
        self.model.fit(input_data,
                       output_data,
                       epochs=epochs,
                       batch_size=batch_size,
                       verbose=2)

        # _, accuracy = self.model.evaluate(input_data, output_data)
        # print("Mean average error: ", accuracy)

    def predict(self, data):
        data = self.input_scaler.transform(data)

        unscaled_return = self.model.predict(data)

        return unscaled_return if self.classification else self.output_scaler.inverse_transform(
            unscaled_return)
              metrics=['accuracy'])

#x_train = x_train.astype('float32')
#x_test = x_test.astype('float32')
#x_train /= 255
#x_test /= 255

checkpointer = ModelCheckpoint(filepath=file_path, 
                               verbose=1, save_best_only=True)

if not data_augmentation:
    print('Not using data augmentation.')
    model.fit(train_tensors, train_targets,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(valid_tensors, valid_targets),
              shuffle=True,
              callbacks=[checkpointer], verbose=1
              )
else:
    print('Using real-time data augmentation.')
    # This will do preprocessing and realtime data augmentation:
    datagen = ImageDataGenerator(
        zca_epsilon=1e-06,  # epsilon for ZCA whitening
        rotation_range=20,  # randomly rotate images in the range (degrees, 0 to 180)
        # randomly shift images horizontally (fraction of total width)
        width_shift_range=0.15,
        # randomly shift images vertically (fraction of total height)
        height_shift_range=0.15,
        shear_range=0.1,  # set range for random shear
        zoom_range=0.1,  # set range for random zoom
Пример #59
0
class lstm():
    def __init__(self,
                 training_input,
                 training_labels,
                 nr_outputs,
                 hidden_layer_nodes,
                 epochs=5,
                 optimization_runs=1,
                 scale_range=(0, 1),
                 activation_function='softmax',
                 history=3,
                 dropout=0.2):
        self.output_data = training_labels
        #self.scaler = MinMaxScaler(feature_range=scale_range).fit(training_input)
        #self.input_data = self.scaler.transform(training_input).astype(np.float32)
        self.input_data = training_input
        self.nr_features = np.shape(self.input_data)[-1]
        self.nr_training_samples = np.shape(self.input_data)[0]
        self.nr_classes = nr_outputs
        self.nr_hidden_layers = len(hidden_layer_nodes)
        self.epochs = epochs
        self.hidden_layer_nodes = hidden_layer_nodes
        self.optimization_tries = optimization_runs
        self.activation_function = activation_function
        self.history = history
        self.dropout = dropout

    def fit(self):
        best = 10000000
        for i in range(self.optimization_tries):
            self.model = Sequential()

            for i in range(self.nr_hidden_layers):
                if i == 0:
                    self.model.add(
                        LSTM(units=self.hidden_layer_nodes[i],
                             activation=self.activation_function,
                             kernel_initializer='random_normal',
                             return_sequences=True,
                             input_shape=(self.history, self.nr_features)))
                    self.model.add(Dropout(self.dropout))
                elif i == (self.nr_hidden_layers - 1):
                    self.model.add(
                        LSTM(units=self.hidden_layer_nodes[i],
                             activation=self.activation_function,
                             kernel_initializer='random_normal'))
                    self.model.add(Dropout(self.dropout))
                else:
                    self.model.add(
                        LSTM(units=self.hidden_layer_nodes[i],
                             activation=self.activation_function,
                             kernel_initializer='random_normal',
                             return_sequences=True))
                    self.model.add(Dropout(self.dropout))

            self.model.add(Dense(units=self.nr_classes))
            self.model.compile(optimizer='adam',
                               loss='mean_squared_error',
                               metrics=['mae'])
            self.model.fit(self.input_data,
                           self.output_data,
                           epochs=self.epochs)
            model_loss = self.model.evaluate(self.input_data,
                                             self.output_data)[0]
            if model_loss < best:
                best = model_loss
                self.final_model = self.model

    def predict(self, data):
        #data = self.scaler.transform(data) #Kanske denna behöver castas till np float 32?
        prediction = self.model.predict(data, batch_size=1)
        return prediction
Пример #60
0
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense,SimpleRNN
test_data=np.array([[[0.5,0.6,0.7]],[[0.6,0.7,0.8]]])
test_data=test_data.reshape(2,3,1)
x_data = np.array([[[0.1, 0.2, 0.3]], [[0.2, 0.3, 0.4]], [[0.3, 0.4, 0.5]], [[0.4, 0.5, 0.6]]])
y_data = np.array([[[0.2, 0.3, 0.4]], [[0.3, 0.4, 0.5]], [[0.4, 0.5, 0.6]], [[0.5, 0.6, 0.7]]])
x_data=x_data.reshape(4,3,1)
y_data=y_data.reshape(4,3,1)

model=Sequential()
model.add(SimpleRNN(50,input_shape=(3,1),return_sequences=True))
model.add(SimpleRNN(40,return_sequences=True))
model.add(SimpleRNN(30,return_sequences=True))
model.add(Dense(1))
model.summary()
model.compile(loss='mse',optimizer='adam',metrics='mse')
model.fit(x_data,y_data,epochs=100,verbose=0)
y=model.predict(test_data)
print(y)