Esempio n. 1
0
def Evaluate():
    open('input/points.csv',"w")
    labels = [['s.no', 'id']]

    for i in range(10000):

        labels[0].append('x'+ str(i+1))
        labels[0].append('y'+ str(i+1))


    csvData = labels
    
    with open('input/points.csv', 'w') as csvFile:
        writer = csv.writer(csvFile)
        writer.writerows(csvData)
    filepath = os.path.join(app.config['UPLOAD_FOLDER'], "input.txt")
    fo = open(os.path.join(app.config['UPLOAD_FOLDER'], "input.txt"), "r")

    filename = os.path.basename(fo.name)
   
    test = "HEGSE"
    csv_list = []
    if filename.find(test) == -1:
        # sno = sno+1
        return_list = getpoints(filepath,1)
        csv_list.append(return_list)

    with open('input/points.csv',"a") as csvFile:
        writer = csv.writer(csvFile)
        writer.writerows(csv_list)
    getvalues=[0,0,0,0]
    pathtocsv = os.path.join(app.config['UPLOAD_FOLDER'], "points.csv")
    pathtoMODEL = os.path.join(app.config['MODEL_FOLDER'], "")
  
    typ=""
    typ, data_x, scaler_rob_x, X, fid = data_pre(pathtocsv, pathtoMODEL)
 
    getvalues= get_val(data_x, typ, scaler_rob_x, X,pathtoMODEL)
  
    if(typ == "on"):
    
        k = "ON"
    
    else:
        k="OFF"
    

    

    emit('Predict',{ 'typ': str( k), 'val2': float(getvalues[1]), 'val3':float(getvalues[2]), 'val4': float(getvalues[3]), 'fid': str(fid) })
    
    emit("Y")
    def train_model(self, train_x, train_y):

        # train_y = train_y['0'].values
        train_y = tf.one_hot(train_y, depth=2)

        tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=data_pre().loadLog() + "people_npz-32-8-2",
                                                              histogram_freq=1)

        network = Sequential([
            layers.Dense(32, activation="sigmoid"),
            # layers.Dense(16, activation="sigmoid"),
            layers.Dense(8, activation="sigmoid"),
            layers.Dense(2)
        ])
        network.build(input_shape=(None, 400))

        network.summary()

        network.compile(optimizer=optimizers.Adam(0.001),
                        loss=losses.CategoricalCrossentropy(from_logits=True),
                        metrics=['accuracy'])

        checkpoint = tf.keras.callbacks.ModelCheckpoint(
            filepath='my_model.h5',
            monitor='val_loss',
            verbose=1,
            save_weights_only=False,
            save_best_only=True,
            mode='min',
        )

        network.fit(x=train_x, y=train_y, epochs=100, validation_split=0.1, validation_freq=1, verbose=1,
                    callbacks=[checkpoint, tensorboard_callback])

        print(network.predict(train_x[:200]))

        network.save("my_model_npz.h5")
Esempio n. 3
0
        test = "HEGSE"
        csv_list = []
        if filename.find(test) == -1:
            # sno = sno+1
            return_list = getpoints(filepath,1)
            csv_list.append(return_list)

        with open('input/points.csv',"a") as csvFile:
            writer = csv.writer(csvFile)
            writer.writerows(csv_list)
        getvalues=[0,0,0,0]
        pathtocsv = os.path.join(app.config['UPLOAD_FOLDER'], "points.csv")
        pathtoMODEL = os.path.join(app.config['MODEL_FOLDER'], "")
        
        typ=""
        typ, data_x, scaler_rob_x, X, fid = data_pre(pathtocsv, pathtoMODEL)
        
        getvalues= get_val(data_x, typ, scaler_rob_x, X,pathtoMODEL)
        
        if(typ == "on"):
        
            k = "ON"
        
        else:
            k="OFF"
        


        typ = str( k)
        val2 = float(getvalues[1])
        val3 = float(getvalues[2])
Esempio n. 4
0
from data_pre import data_pre
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
import xgboost as xgb
train, test = data_pre()

label = train['收率']
test_id = test['样本id']
del test['样本id']
del test['收率']
del train['样本id']
del train['收率']

train.fillna(-1, inplace=True)
test.fillna(-1, inplace=True)

# 五折交叉验证
folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_lgb = np.zeros(len(train))
predictions_lgb = np.zeros(len(test))

param = {
    'num_leaves': 120,
    'min_data_in_leaf': 30,
    'objective': 'regression',
    'max_depth': -1,
    'learning_rate': 0.01,
    "min_child_samples": 30,
        network.compile(optimizer=optimizers.Adam(0.001),
                        loss=losses.CategoricalCrossentropy(from_logits=True),
                        metrics=['accuracy'])

        checkpoint = tf.keras.callbacks.ModelCheckpoint(
            filepath='my_model.h5',
            monitor='val_loss',
            verbose=1,
            save_weights_only=False,
            save_best_only=True,
            mode='min',
        )

        network.fit(x=train_x, y=train_y, epochs=100, validation_split=0.1, validation_freq=1, verbose=1,
                    callbacks=[checkpoint, tensorboard_callback])

        print(network.predict(train_x[:200]))

        network.save("my_model_npz.h5")

    pass


pass

if __name__ == '__main__':
    npz_filepath = r"/home/jry/MicroWave_right_npz/mydataset.npz"
    # x, y = data_pre().loadData()
    x, y =data_pre().load_npz_data(path=npz_filepath)
    bp_model().train_model(train_x=x, train_y=y)