def __init__(self,user_id,max_m,max_v,max_c): self.user_id = user_id self.max_m = max_m self.max_v = max_v self.max_c = max_c #sys.setrecursionlimit(100000000) #print(sys.getrecursionlimit()) self.dataset = extract_data(user_id+'train',['*']) self.name = np.array(self.dataset[0]) self.dataset1 = self.dataset[1] def transpose(matrix): return zip(*matrix) self.dataset1 = transpose(self.dataset1) self.Data_123 = pd.DataFrame(self.dataset1) self.Data_123.columns = self.name #print(self.Data_123) self.dataset_test = extract_data(user_id+'predict', ['*']) self.name_test = np.array(self.dataset_test[0]) self.dataset1_test = self.dataset_test[1] def transpose(matrix): return zip(*matrix) self.dataset1_test = transpose(self.dataset1_test) self.Data_123_test = pd.DataFrame(self.dataset1_test) self.Data_123_test.columns = self.name_test
def change_data(self,user_id): dataset = extract_data(user_id + 'train1', ['*']) name = np.array(dataset[0]) dataset1 = dataset[1] def transpose(matrix): return zip(*matrix) dataset1 = transpose(dataset1) Data_123 = pd.DataFrame(dataset1) Data_123.columns = name # print(self.Data_123) dataset_test = extract_data(user_id + 'predict1', ['*']) name_test = np.array(dataset_test[0]) dataset1_test = dataset_test[1] def transpose(matrix): return zip(*matrix) dataset1_test = transpose(dataset1_test) Data_123_test = pd.DataFrame(dataset1_test) Data_123_test.columns = name_test return (Data_123,Data_123_test)
def display(user_id): train_name = extract_data(user_id + 'train', ['*']) test_name = extract_data(user_id + 'predict', ['*']) if train_name == "Error: unable to fecth data" or test_name == "Error: unable to fecth data": return ("Error: unable to fecth data") else: train_name = np.array(train_name[0]).tolist() test_name = np.array(test_name[0]).tolist() c = [x for x in train_name if x in test_name] print(train_name) return (c)
def MysqlDataExtract(DataName=None, table_name=None): data = extract_data(table_name, DataName) name = np.array(data[0]) DataValue = data[1] DataValue = transpose(DataValue) x = pandas.DataFrame(DataValue) x.columns = name return x
def num_change222(self): dataset_train = extract_data(self.user_id + 'train1', ['*']) dataset_test = extract_data(self.user_id + 'predict1', ['*']) name_train = np.array(dataset_train[0]) dataset1_train = dataset_train[1] name_test = np.array(dataset_test[0]) dataset1_test = dataset_test[1] def transpose(matrix): return zip(*matrix) dataset1_train = transpose(dataset1_train) Data_12_train = pd.DataFrame(dataset1_train) Data_12_train.columns = name_train dataset1_test = transpose(dataset1_test) Data_12_test = pd.DataFrame(dataset1_test) Data_12_test.columns = name_test return (Data_12_train,Data_12_test)
def CH_DataFrame(T_name, X): data = extract_data(T_name, X) name = np.array(data[0]) print(name) Value = data[1] Value = zip(*Value) x = pd.DataFrame(Value) x.columns = name x = x.replace("", np.NAN) x = x.fillna(method='pad') #print (x) return x
def __init__(self, feature, user_id): self.user_id = user_id self.feature = feature self.dataset_train = extract_data(user_id + 'train1', self.feature) print(self.dataset_train) self.dataset_test = extract_data(user_id + 'predict1', self.feature) print(1) print(self.dataset_test) self.name_train = np.array(self.dataset_train[0]) self.dataset1_train = self.dataset_train[1] self.name_test = np.array(self.dataset_test[0]) self.dataset1_test = self.dataset_test[1] def transpose(matrix): return zip(*matrix) self.dataset1_train = transpose(self.dataset1_train) self.Data_12_train = pd.DataFrame(self.dataset1_train) self.Data_12_train.columns = self.name_train self.dataset1_test = transpose(self.dataset1_test) self.Data_12_test = pd.DataFrame(self.dataset1_test) self.Data_12_test.columns = self.name_test
def __init__(self,type,item,feature,user_id,any_num=None): self.type= type #print(self.type) self.user_id = user_id self.any_num = any_num self.item=item self.feature = feature self.dataset_train = extract_data(user_id+'train',['*']) self.dataset_test = extract_data(user_id+'predict',['*']) self.name_train = np.array(self.dataset_train[0]) self.dataset1_train = self.dataset_train[1] self.name_test = np.array(self.dataset_test[0]) self.dataset1_test = self.dataset_test[1] def transpose(matrix): return zip(*matrix) self.dataset1_train = transpose(self.dataset1_train) self.Data_12_train = pd.DataFrame(self.dataset1_train) self.Data_12_train.columns = self.name_train self.dataset1_test = transpose(self.dataset1_test) self.Data_12_test = pd.DataFrame(self.dataset1_test) self.Data_12_test.columns = self.name_test
def PredictFile(Model=None, ID=None, Result=None, UserID=None): #X为输入特征列表 ##### T_name = UserID + "predict1" data = extract_data(T_name, ID) name = np.array(data[0]) Value = np.array(data[1]) Value = Value.transpose() x = pd.DataFrame(Value) x.columns = name ##### a = len(x.columns) x.insert(a, '预测结果', Result) path = os.path.abspath('../UserFiles/' + str(UserID) + '/Result/Predict' + str(Model) + '.csv') x.to_csv(path, index=False) return
def Clustering_train(User_id, feature_1, max_k): #从数据库导入数据,将list的数据转化为Dataframe try: dataset_x = extract_data(User_id + 'predict1', feature_1) name_x = np.array(dataset_x[0]) x_dataset0 = dataset_x[1] def transpose(matrix): return zip(*matrix) x_dataset = transpose(x_dataset0) x = pd.DataFrame(x_dataset) x.columns = name_x except: return 0 #将空字符串数据转化为Nan然后进行顺序填充 x = x.replace('', np.NaN) x = x.fillna(method='pad') if max_k > 30: max_k = 30 SSE = [] # 存放每次结果的误差平方和 for k in range(1, max_k + 1): estimator = KMeans(n_clusters=k) # 构造聚类器 estimator.fit(x) SSE.append(estimator.inertia_) X = range(1, max_k + 1) print(X, SSE) plt.plot(X, SSE, 'o-') plt.xlabel('K') plt.ylabel('SSE') path = os.path.abspath(os.path.join( os.getcwd(), "..")) + '/' + 'UserFiles' + '/' + str( User_id) + '/' + 'Result' + '/' + str( User_id) + '_cluster_trend_map.png' #os.remove(path) plt.savefig(path) plt.close() #缺少此句 下一次绘图图片会叠加在上图 return 1
def show_name1(user_id): try: dataset_train = extract_data(user_id + 'train1', ['*']) #print(dataset_train) name_train = np.array(dataset_train[0]) dataset1_train = np.array(dataset_train[1]) def transpose(matrix): return zip(*matrix) dataset1_train = transpose(dataset1_train) Data_12_train = pd.DataFrame(dataset1_train) Data_12_train.columns = name_train i = 0 list1 = [] while i < Data_12_train.shape[1]: Data_1_train = Data_12_train[Data_12_train.columns[i]] Data_2_train = Data_1_train.to_list() shang = calc_ent(Data_2_train) #print(shang) list1.append(shang) i += 1 jieguoshang = dict(zip(name_train, list1)) #return(jieguoshang) a = sorted(jieguoshang.items(), key=operator.itemgetter(1)) list2 = [] for i in a: b = list(i) list2.append(b) dict2 = dict(list2) print(dict2) return dict2 except: return 0
def show_name(user_id): train_name = extract_data(user_id + 'train1', ['*']) train_name = np.array(train_name[0]).tolist() return (train_name)
def Lstm_Model_train(User_id, X_Data_Characteristic_n0, Y_Data_Characteristic, N_Layers2, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5, epoch): try: N1 = N_First_Layer_Neurons dataset_x = extract_data(User_id + 'train1', X_Data_Characteristic_n0) N_input_dim = len(dataset_x[0]) dataset_y = extract_data(User_id + 'train1', Y_Data_Characteristic) name_x = np.array(dataset_x[0]) name_y = np.array(dataset_y[0]) x_dataset = dataset_x[1] y_dataset = dataset_y[1] def transpose(matrix): return zip(*matrix) x_dataset = transpose(x_dataset) x = pd.DataFrame(x_dataset) x.columns = name_x y_dataset = transpose(y_dataset) y = pd.DataFrame(y_dataset) y.columns = name_y x = x.replace('', np.NaN) y = y.replace('', np.NaN) x = x.fillna(method='pad') y = y.fillna(method='pad') x = np.array(x) y = np.array(y) x_train = x.astype('float64') y_train = y.astype('float64') y_train = np.array(y_train).reshape(y_train.shape[0], 1) scaler = MinMaxScaler(feature_range=(0, 1)) x_train = scaler.fit_transform(x_train) y_train = scaler.fit_transform(y_train) # split into train and test sets train_size = int(len(x_train) * 0.8) test_size = len(x_train) - train_size train_x, test_x = x_train[0:train_size, :], x_train[ train_size:len(x_train), :] train_y, test_y = y_train[0:train_size, :], y_train[ train_size:len(x_train), :] train_x = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1])) test_x = np.reshape(test_x, (test_x.shape[0], 1, test_x.shape[1])) K.clear_session() list = [ N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5 ] list1 = [] for i in list: if i > 256: i = 256 list1.append(i) else: list1.append(i) N_First_Layer_Neurons = list1[0] N_Layer_Neurons2 = list1[1] N_Layer_Neurons3 = list1[2] N_Layer_Neurons4 = list1[3] N_Layer_Neurons5 = list1[4] if N_Layers2 > 5: N_Layers2 = 5 model = Sequential() def creat_full_lstm_model(N_Layers2, N_input_dim, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5): if N_Layers2 == 2: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N_First_Layer_Neurons, return_sequences=True)) model.add(LSTM(N_Layer_Neurons2, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 == 3: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N_First_Layer_Neurons, return_sequences=True)) model.add(LSTM(N_Layer_Neurons2, return_sequences=True)) model.add(LSTM(N_Layer_Neurons3, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 == 4: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N_First_Layer_Neurons, return_sequences=True)) model.add(LSTM(N_Layer_Neurons2, return_sequences=True)) model.add(LSTM(N_Layer_Neurons3, return_sequences=True)) model.add(LSTM(N_Layer_Neurons4, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 == 5: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N_First_Layer_Neurons, return_sequences=True)) model.add(LSTM(N_Layer_Neurons2, return_sequences=True)) model.add(LSTM(N_Layer_Neurons3, return_sequences=True)) model.add(LSTM(N_Layer_Neurons4, return_sequences=True)) model.add(LSTM(N_Layer_Neurons5, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) def create_lstm_model(N_Layers2, N_input_dim): if N_Layers2 > 5 and N_Layers2 < 10: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) # model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 > 9 and N_Layers2 < 20: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) # model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 > 19: model.add( LSTM(N1, input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) # model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 > 5: model = create_lstm_model(N_Layers2, N_input_dim) if N_Layers2 < 6: medel = creat_full_lstm_model(N_Layers2, N_input_dim, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5) print(model.summary()) import keras #自定义优化器 rmsprop = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06) adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06) adadelta = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06) adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) adamax = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) nadam = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) #R2 评价指标 def r_square(y_true, y_pred): SSR = K.mean(K.square(y_pred - K.mean(y_true)), axis=-1) SST = K.mean(K.square(y_true - K.mean(y_true)), axis=-1) return SSR / SST model.compile(loss='mse', optimizer=adamax, metrics=[r_square]) train_history = model.fit(train_x, train_y, batch_size=20, nb_epoch=epoch, validation_split=0.1, verbose=1) model.save_weights( os.path.abspath(os.path.join(os.getcwd(), "..")) + '/' + 'UserFiles' + '/' + str(User_id) + '/' + 'Result' + '/' + str(User_id) + '_Lstm_Model_weightsss.h5') import matplotlib.pyplot as plt cost = model.evaluate(test_x, test_y, batch_size=100) Forecast_Result_loss = cost[1] Data_Characteristic_n2 = X_Data_Characteristic_n0 + Y_Data_Characteristic test_y_pred = model.predict(test_x) fig1, ax1 = plt.subplots() ax1.scatter(test_y, test_y_pred, edgecolors=(0, 0, 0)) ax1.plot([test_y.min(), test_y.max()], [test_y.min(), test_y.max()], 'k--', lw=4) ''' my_x_ticks = np.arange(-1, 1, 0.05) my_y_ticks = np.arange(-1, 1, 0.03) plt.xticks(my_x_ticks) plt.yticks(my_y_ticks) ''' ax1.set_xlabel('True') ax1.set_ylabel('Measured') plt.savefig( os.path.abspath(os.path.join(os.getcwd(), "..")) + '/' + 'UserFiles' + '/' + str(User_id) + '/' + 'Result' + '/' + str(User_id) + '_true_vs_prediction_map1.png') except: return 0 def text_save(content, User_id, mode='a', N_Layer_Neurons2=10): # Try to save a list variable in txt file. t = os.path.abspath(os.path.join(os.getcwd(), "..")) newfile = t + '/' + 'UserFiles' + '/' + str( User_id) + '/' + 'Result' + '/' + str(User_id) + '_LSTM_Model.csv' if not os.path.exists(newfile): f = open(newfile, 'w') print newfile f.close() print newfile + " created." else: print newfile + " already existed." file = open(newfile, mode) for i in range(len(content)): file.write(str(content[i]) + '\n') file.close() text_save(("输入特征X:", X_Data_Characteristic_n0, "输出特征Y:", Y_Data_Characteristic, "预测结果得分:", Forecast_Result_loss), User_id, mode='a', N_Layer_Neurons2=10) return 1
def Clustering_test(User_id, feature_1, perfect_k): dataset_y = extract_data(User_id + 'predict1', feature_1) name_y = np.array(dataset_y[0]) y_dataset0 = dataset_y[1] def transpose(matrix): return zip(*matrix) y_dataset = transpose(y_dataset0) x = pd.DataFrame(y_dataset) x.columns = name_y # digits_train = pd.read_csv('D:\\test(1).csv',encoding='gbk') # 从样本中抽取出64维度像素特征和1维度目标 # x = digits_train[['GENDER', 'AGE', 'IN_NET_DUR','STAR_LEVEL']] x = x.replace('', np.NaN) x = x.fillna(method='pad') if perfect_k > 30: perfect_k = 30 kmeans = KMeans(n_clusters=perfect_k) y_predict = kmeans.fit_predict(x) labels = kmeans.labels_ #预测出的类别标签 #print(labels) SSE_Min = kmeans.inertia_ cluster_centers = kmeans.cluster_centers_ #聚类中心 #print(cluster_centers) #cluster_centers = np.array(cluster_centers).reshape(1,cluster_centers.shape[0]) #将其转化保存在csv文件方便读取模块读取 #print(type(cluster_centers)) y_predict = y_predict.reshape(y_predict.shape[0], 1) x_train_list = np.array(x).tolist() z = [x_train_list, y_predict] #print(z) data = DataFrame(z) # 这时候是以行为标准写入的 data = data.T # 转置之后得到想要的结果 data.columns = [feature_1, '聚类结果'] #print(data) data.to_csv(os.path.abspath(os.path.join(os.getcwd(), "..")) + '/' + 'UserFiles' + '/' + str(User_id) + '/' + 'Result' + '/' + str(User_id) + '_Clustering_result.csv', index=False) def text_save(content, User_id, feature_1, perfect_k, mode='a'): # Try to save a list variable in txt file. t = os.path.abspath(os.path.join(os.getcwd(), "..")) newfile = t + '/' + 'UserFiles' + '/' + str( User_id) + '/' + 'Result' + '/' + str( User_id) + '_Clustering_result_record.csv' if not os.path.exists(newfile): f = open(newfile, 'w') print newfile f.close() print newfile + " created." else: print newfile + " already existed." file = open(newfile, mode) for i in range(len(content)): file.write(str(content[i]) + '\n') file.close() text_save(("特征:", feature_1, "最佳分类K值:", perfect_k, "最小残差平方和:", SSE_Min, "聚类中心点", cluster_centers), User_id, feature_1, perfect_k, mode='a') return 1
def Lstm_Model_test(User_id, X_Data_Characteristic_n0, Y_Data_Characteristic, N_Layers2, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5, epoch): try: N_input_dim = len(X_Data_Characteristic_n0) N1 = N_First_Layer_Neurons dataset_x = extract_data(User_id + 'predict1', X_Data_Characteristic_n0) dataset_y = extract_data(User_id + 'train1', Y_Data_Characteristic) name_x = np.array(dataset_x[0]) name_y = np.array(dataset_y[0]) x_dataset = dataset_x[1] y_dataset = dataset_y[1] def transpose(matrix): return zip(*matrix) x_dataset = transpose(x_dataset) x = pd.DataFrame(x_dataset) x.columns = name_x y_dataset = transpose(y_dataset) y = pd.DataFrame(y_dataset) y.columns = name_y x = x.replace('', np.NaN) y = y.replace('', np.NaN) x = x.fillna(method='pad') y = y.fillna(method='pad') x = np.array(x) scaler = MinMaxScaler(feature_range=(0, 1)) x_train = scaler.fit_transform(x) y_train = scaler.fit_transform(y) x_train = x_train.astype('float64') train_x = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1])) K.clear_session() list = [ N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5 ] list1 = [] for i in list: if i > 256: i = 256 list1.append(i) else: list1.append(i) N_First_Layer_Neurons = list1[0] N_Layer_Neurons2 = list1[1] N_Layer_Neurons3 = list1[2] N_Layer_Neurons4 = list1[3] N_Layer_Neurons5 = list1[4] if N_Layers2 > 5: N_Layers2 = 5 model = Sequential() def creat_full_lstm_model(N_Layers2, N_input_dim, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5): if N_Layers2 == 2: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N_First_Layer_Neurons, return_sequences=True)) model.add(LSTM(N_Layer_Neurons2, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 == 3: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N_First_Layer_Neurons, return_sequences=True)) model.add(LSTM(N_Layer_Neurons2, return_sequences=True)) model.add(LSTM(N_Layer_Neurons3, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 == 4: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N_First_Layer_Neurons, return_sequences=True)) model.add(LSTM(N_Layer_Neurons2, return_sequences=True)) model.add(LSTM(N_Layer_Neurons3, return_sequences=True)) model.add(LSTM(N_Layer_Neurons4, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 == 5: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N_First_Layer_Neurons, return_sequences=True)) model.add(LSTM(N_Layer_Neurons2, return_sequences=True)) model.add(LSTM(N_Layer_Neurons3, return_sequences=True)) model.add(LSTM(N_Layer_Neurons4, return_sequences=True)) model.add(LSTM(N_Layer_Neurons5, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) def create_lstm_model(N_Layers2, N_input_dim): if N_Layers2 > 5 and N_Layers2 < 10: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) # model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 > 9 and N_Layers2 < 20: model.add( LSTM(input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) # model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 > 19: model.add( LSTM(N1, input_dim=N_input_dim, units=N_input_dim, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) # model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=True)) model.add(LSTM(N1, return_sequences=False)) model.add(Dense(output_dim=1)) model.add(Activation('linear')) return model if N_Layers2 > 5: model = create_lstm_model(N_Layers2, N_input_dim) if N_Layers2 < 6: medel = creat_full_lstm_model(N_Layers2, N_input_dim, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5) import keras #自定义优化器 rmsprop = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06) adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06) adadelta = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06) adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) adamax = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) nadam = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='mse', optimizer=adamax) except: return 2 try: model.load_weights( os.path.abspath(os.path.join(os.getcwd(), "..")) + '/' + 'UserFiles' + '/' + str(User_id) + '/' + 'Result' + '/' + str(User_id) + '_Lstm_Model_weightsss.h5') pred_y = model.predict(train_x) except ValueError: return 0 else: pred_y = pred_y.reshape(pred_y.shape[0], 1) predict_10 = scaler.inverse_transform(pred_y) predict_10 = pd.DataFrame(predict_10) predict_10.columns = ['预测结果'] test_x = pd.DataFrame(x) test_x.columns = name_x pred_result = pd.concat((test_x, predict_10), axis=1, ignore_index=False) pred_result.to_csv(os.path.abspath(os.path.join(os.getcwd(), "..")) + '/' + 'UserFiles' + '/' + str(User_id) + '/' + 'Result' + '/' + User_id + '_LSTM_Model_Pred_result.csv', index=False) return 1
def Linear_Regression_Model(User_id, xfeature, yfeature): #从数据库中获取数据 try: table_name = "" + User_id + "train1" table_name1 = "" + User_id + "predict1" dataset_1 = extract_data(table_name, xfeature) dataset_2 = extract_data(table_name, yfeature) dataset_3 = extract_data(table_name1, xfeature) name_x = np.array(dataset_1[0]) name_y = np.array(dataset_2[0]) name_xp = np.array(dataset_3[0]) x_dataset = dataset_1[1] y_dataset = dataset_2[1] xp_dataset = dataset_3[1] def transpose(matrix): return zip(*matrix) x_dataset = transpose(x_dataset) x = pd.DataFrame(x_dataset) x.columns = name_x xp_dataset = transpose(xp_dataset) xp = pd.DataFrame(xp_dataset) xp.columns = name_xp y_dataset = transpose(y_dataset) y = pd.DataFrame(y_dataset) y.columns = name_y # 空值填充 x = x.replace('', np.NAN) xp = xp.replace('', np.NAN) y = y.replace('', np.NAN) x = x.fillna(method='pad') xp = xp.fillna(method='pad') y = y.fillna(method='pad') #线性回归 X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2) model = LinearRegression() model.fit(X_train, y_train) joblib.dump( model, os.path.abspath( os.path.join(os.getcwd(), "..") + "/UserFiles/" + User_id + "/Result/" + User_id + "_Linear_Regression_Model.pkl")) yp = model.predict(xp) yp = yp.tolist() yp = pd.DataFrame(yp) yp.columns = ["" + yfeature[0] + "预测结果"] pred_result = pd.concat((xp, yp), axis=1, ignore_index=False) pred_result.to_csv(os.path.abspath( os.path.join(os.getcwd(), "..") + "/UserFiles/" + User_id + "/Result/" + User_id + "_Linear_Regression_Model_Pred_Result.csv"), index=False) test_score = model.score(X_test, y_test) #结果输出到txt文件 def result_write(User_id, list1, list2, yfeature, test_score): q = '' for i in range(0, len(list2)): q = q + "X" + str(i + 1) + ":" + list2[i] + "," w = "Y:" + yfeature[0] + "\n" Result = "Y = (" + list1[0] + ")+" for i in range(1, len(list1)): Result = Result + "(" + list1[i] + "*X" + str(i) + ")+" Result = Result[:-1] + '\n' q = q[:-1] + '\n' score = "score = " + str(test_score) + "" + '\n' with open(os.path.abspath( os.path.join(os.getcwd(), "..") + "/UserFiles/" + User_id + "/Result/" + User_id + "_Linear_Regression_Model_Result.txt"), "w", encoding='utf-8') as f: f.write(str(Result)) # 表达式 f.write(str(q)) # 表达式中x说明 f.write(str(w)) # 表达式中y说明 f.write(str(score)) # 测试集得分 f.close() return list1 = list(model.intercept_) + list(model.coef_[0]) list1 = [str(i) for i in list1] list2 = list(x.columns) result_write(User_id, list1, list2, yfeature, test_score) #画图 predicted = model.predict(x) #设置标题 fig = plt.figure() ax1 = fig.add_subplot(111) ax1.set_title('true_vs_prediction') plt.scatter(y, predicted, color='y', marker='o', label='predicted data') plt.scatter(y, y, color='g', marker='+', label='Original data') plt.legend(['predicted data', 'Original data'], loc='upper right') plt.savefig( os.path.abspath( os.path.join(os.getcwd(), "..") + "/UserFiles/" + User_id + "/Result/" + User_id + "_Linear_Regression_map.png")) #保存图片 return 1 except: return 0
def Neural_Network_Model_test(User_id,ID,feature,N_Last_Layer_Neurons,N_Layers1,N_First_Layer_Neurons,N_Layer_Neurons2,N_Layer_Neurons3,N_Layer_Neurons4,N_Layer_Neurons5,epoch): # self.file_path = file_path try: N1 = N_First_Layer_Neurons #x从测试集的数据表提取值 dataset_x = extract_data(User_id+'predict1', feature) #y从训练集的数据表提取值 dataset_y = extract_data(User_id+'train1', N_Last_Layer_Neurons) dataset_ID = extract_data(User_id+'predict1', ID) name_x = np.array(dataset_x[0]) leng_x = len(name_x) name_y = np.array(dataset_y[0]) name_ID = np.array(dataset_ID[0]) x_dataset0 = dataset_x[1] y_dataset0 = dataset_y[1] ID_dataset0 = dataset_ID[1] # print(y_dataset0) def transpose(matrix): return zip(*matrix) x_dataset = transpose(x_dataset0) x = pd.DataFrame(x_dataset) x.columns = name_x y_dataset = transpose(y_dataset0) y = pd.DataFrame(y_dataset) y.columns = name_y ID_dataset = transpose(ID_dataset0) ID_dataset2 = pd.DataFrame(ID_dataset) ID_dataset2.columns = name_ID x = x.replace('', np.NaN) y = y.replace('', np.NaN) imp = Imputer(missing_values='NaN', strategy='mean', verbose=0) imp.fit(x) x = imp.transform(x) y = y.fillna(method='pad') x = np.array(x) y = np.array(y).tolist() set1 = [item for sublist in y for item in sublist] N_last = len(set(set1)) scaler = MinMaxScaler(feature_range=(0, 1)) x_1 = scaler.fit_transform(x) x = x_1.reshape(x.shape[0], 1, leng_x) #encoder = LabelEncoder() #encoder.fit(y) #y_train_dataset = encoder.transform(y) #if np.any(y_train_dataset): #y = np_utils.to_categorical(y_train_dataset, num_classes=N_last) x_train = x.astype('float64') #y_train = y.astype('float64') #train_x, test_x, train_y, test_y = train_test_split(x_train, y_train, train_size=0.8, random_state=40) ''' print(train_x.shape) print(train_x) print(train_y.shape) print(train_y) print(test_x.shape) print(test_x) print(test_y.shape) print(test_y) ''' from keras.models import Sequential from keras import regularizers keras.backend.clear_session() # 判断输入层神经元个数是否超过最大值256 list = [N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5] list1 = [] for i in list: if i > 256: i = 256 list1.append(i) else: list1.append(i) N_First_Layer_Neurons = list1[0] N_Layer_Neurons2 = list1[1] N_Layer_Neurons3 = list1[2] N_Layer_Neurons4 = list1[3] N_Layer_Neurons5 = list1[4] if N_Layers1 > 5: N_Layers1 = 5 model = Sequential() def create_full_nnw_model(N_Layers1, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5): if N_Layers1 == 2: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(N_Layer_Neurons2, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dropout(0.5)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1 == 3: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(N_Layer_Neurons2, activation='relu')) model.add(Dense(N_Layer_Neurons3, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dropout(0.5)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1 == 4: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(N_Layer_Neurons2, activation='relu')) model.add(Dense(N_Layer_Neurons3, activation='relu')) model.add(Dense(N_Layer_Neurons4, activation='relu')) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1 == 5: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(N_Layer_Neurons2, activation='relu')) model.add(Dense(N_Layer_Neurons3, activation='relu')) model.add(Dense(N_Layer_Neurons4, activation='relu')) model.add(Dense(N_Layer_Neurons5, activation='relu')) model.add(Dense(N_last, activation='softmax')) return model def create_model(N_Layers1, N1): if N_Layers1 > 5 and N_Layers1 < 10: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dropout(0.5)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1 > 9 and N_Layers1 < 20: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) # model.add(Dropout(0.2)) ''' model.add(Convolution1D(32, 1)) model.add(Activation('relu')) model.add(Convolution1D(32, 1)) model.add(Activation('relu')) model.add(Convolution1D(64, 1)) model.add(Activation('relu')) model.add(Convolution1D(64, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(256, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) ''' model.add(Flatten()) # model.add(Dense(256, input_dim=(4),activation='relu')) model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dropout(0.5)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) model.add(Dense(8 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(8 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(8 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(8 * N1, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(16 * N1, activation='relu')) # model.add(Dropout(0.3)) model.add(Dense(16 * N1, activation='relu')) # model.add(Dropout(0.3)) model.add(Dense(16 * N1, activation='relu')) # model.add(Dropout(0.3)) model.add(Dense(16 * N1, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1 > 19: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(4 * N1, activation='relu')) model.add(Dense(4 * N1, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(8 * N1, activation='relu')) model.add(Dense(8 * N1, activation='relu')) model.add(Dense(8 * N1, activation='relu')) model.add(Dense(8 * N1, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(16 * N1, activation='relu')) model.add(Dense(16 * N1, activation='relu')) model.add(Dense(16 * N1, activation='relu')) model.add(Dense(16 * N1, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(32 * N1, activation='relu')) model.add(Dense(32 * N1, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(64 * N1, activation='relu')) model.add(Dense(64 * N1, activation='relu')) model.add(Dense(64 * N1, activation='relu')) model.add(Dense(64 * N1, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(N_last, activation='softmax')) return model # 自定义优化器 if N_Layers1 > 5: model = create_model(N_Layers1, N1) if N_Layers1 < 6: model = create_full_nnw_model(N_Layers1, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5) graph = tf.get_default_graph() model.summary() rmsprop = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06) adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06) adadelta = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06) adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) adamax = keras.optimizers.Adamax(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) nadam = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) except: return 2 try: model.load_weights(os.path.abspath(os.path.join(os.getcwd(), "..")) + '/' + 'UserFiles' + '/' + str(User_id) + '/' + 'Result' + '/' + str(User_id) + '_Neural_Network_Model_weightsss.h5') pred_y = model.predict(x_train) except ValueError: return 0 else: list_label = np.argmax(pred_y, axis=1) predict_10 = pd.DataFrame(list_label) predict_10.columns = name_y test_x = scaler.inverse_transform(x_1) test_x = pd.DataFrame(test_x) test_x.columns = name_x print(ID_dataset2) pred_result = pd.concat((ID_dataset2, predict_10), axis=1, ignore_index=False) #print(pred_result) pred_result.to_csv(os.path.abspath(os.path.join(os.getcwd(), "..")) + '/'+ 'UserFiles' +'/'+ str(User_id) +'/'+ 'Result' +'/' + str(User_id)+'_Neural_Network_Model_Pred_result.csv',index=False) return 1
def Neural_Network_Model_train(User_id,feature,N_Last_Layer_Neurons,N_Layers1,N_First_Layer_Neurons,N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5,epoch): N1 = N_First_Layer_Neurons try: dataset_x = extract_data(User_id+'train1',feature) dataset_y = extract_data(User_id+'train1',N_Last_Layer_Neurons) name_x = np.array(dataset_x[0]) leng_x = len(name_x) name_y = np.array(dataset_y[0]) x_dataset0 = dataset_x[1] y_dataset0 = dataset_y[1] def transpose(matrix): return zip(*matrix) x_dataset = transpose(x_dataset0) x = pd.DataFrame(x_dataset) x.columns = name_x y_dataset = transpose(y_dataset0) y = pd.DataFrame(y_dataset) y.columns = name_y except: return 2 try: x = x.replace('', np.NaN) y = y.replace('', np.NaN) imp = Imputer(missing_values='NaN',strategy='mean',verbose=0) imp.fit(x) x=imp.transform(x) y= y.fillna(method='pad') x=np.array(x) y=np.array(y).tolist() set1 = [item for sublist in y for item in sublist] N_last = len(set(set1)) scaler = MinMaxScaler(feature_range=(0, 1)) x = scaler.fit_transform(x) x = x.reshape(x.shape[0],1,leng_x) encoder = LabelEncoder() encoder.fit(y) y_train_dataset = encoder.transform(y) if np.any(y_train_dataset): y = np_utils.to_categorical(y_train_dataset, num_classes=N_last) x_train = x.astype('float64') y_train = y.astype('float64') except: return 3 train_x, test_x, train_y, test_y = train_test_split(x_train, y_train,train_size=0.8, random_state=40) ''' print(train_x.shape) print(train_x) print(train_y.shape) print(train_y) print(test_x.shape) print(test_x) print(test_y.shape) print(test_y) ''' from keras.models import Sequential from keras import regularizers keras.backend.clear_session() #判断输入层神经元个数是否超过最大值256 list = [N_First_Layer_Neurons, N_Layer_Neurons2,N_Layer_Neurons3,N_Layer_Neurons4,N_Layer_Neurons5] list1 = [] for i in list: if i > 256: i = 256 list1.append(i) else: list1.append(i) N_First_Layer_Neurons = list1[0] N_Layer_Neurons2 = list1[1] N_Layer_Neurons3 = list1[2] N_Layer_Neurons4 = list1[3] N_Layer_Neurons5 = list1[4] if N_Layers1>5: N_Layers1 = 5 model = Sequential() def create_full_nnw_model(N_Layers1, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3, N_Layer_Neurons4, N_Layer_Neurons5): if N_Layers1 == 2: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(N_Layer_Neurons2, activation='relu')) # model.add(Dropout(0.2)) #model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dropout(0.5)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(64, activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1 == 3: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(N_Layer_Neurons2, activation='relu')) model.add(Dense(N_Layer_Neurons3, activation='relu')) # model.add(Dropout(0.2)) #model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dropout(0.5)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(64, activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1 == 4: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(N_Layer_Neurons2, activation='relu')) model.add(Dense(N_Layer_Neurons3, activation='relu')) model.add(Dense(N_Layer_Neurons4, activation='relu')) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1 == 5: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(N_Layer_Neurons2, activation='relu')) model.add(Dense(N_Layer_Neurons3, activation='relu')) model.add(Dense(N_Layer_Neurons4, activation='relu')) model.add(Dense(N_Layer_Neurons5, activation='relu')) model.add(Dense(N_last, activation='softmax')) return model def create_model(N_Layers1,N1): if N_Layers1>5 and N_Layers1<10: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2 * N1, 1)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(4 * N1, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dropout(0.5)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1>9 and N_Layers1<20: model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2*N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2*N1, 1)) model.add(Activation('relu')) #model.add(Dropout(0.2)) ''' model.add(Convolution1D(32, 1)) model.add(Activation('relu')) model.add(Convolution1D(32, 1)) model.add(Activation('relu')) model.add(Convolution1D(64, 1)) model.add(Activation('relu')) model.add(Convolution1D(64, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(256, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) ''' model.add(Flatten()) #model.add(Dense(256, input_dim=(4),activation='relu')) model.add(Dense(4*N1, activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(4*N1, activation='relu')) #model.add(Dropout(0.2)) #model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) #model.add(Dropout(0.2)) #model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) #model.add(Dropout(0.2)) #model.add(Dropout(0.5)) #model.add(Dense(64, activation='relu')) #model.add(Dropout(0.2)) #model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) #model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) model.add(Dense(8*N1, activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(8*N1,activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(8*N1, activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(8*N1, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(16*N1, activation='relu')) #model.add(Dropout(0.3)) model.add(Dense(16*N1,activation='relu')) #model.add(Dropout(0.3)) model.add(Dense(16*N1, activation='relu')) #model.add(Dropout(0.3)) model.add(Dense(16*N1, activation='relu')) model.add(Dropout(0.3)) #model.add(Dense(2048, activation='relu')) #model.add(Dense(64, activation='relu')) model.add(Dense(N_last, activation='softmax')) return model if N_Layers1>19 : model.add(Convolution1D(nb_filter=N1, filter_length=1, input_shape=(1, leng_x))) model.add(Activation('relu')) model.add(Convolution1D(N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2*N1, 1)) model.add(Activation('relu')) model.add(Convolution1D(2*N1, 1)) model.add(Activation('relu')) #model.add(Dropout(0.2)) ''' model.add(Convolution1D(32, 1)) model.add(Activation('relu')) model.add(Convolution1D(32, 1)) model.add(Activation('relu')) model.add(Convolution1D(64, 1)) model.add(Activation('relu')) model.add(Convolution1D(64, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(256, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) model.add(Convolution1D(128, 1)) model.add(Activation('relu')) ''' model.add(Flatten()) #model.add(Dense(256, input_dim=(4),activation='relu')) model.add(Dense(4*N1, activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(4*N1, activation='relu')) #model.add(Dropout(0.2)) #model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) #model.add(Dropout(0.2)) #model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) #model.add(Dropout(0.2)) #model.add(Dropout(0.5)) #model.add(Dense(64, activation='relu')) #model.add(Dropout(0.2)) #model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) #model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) model.add(Dense(8*N1, activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(8*N1,activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(8*N1, activation='relu')) #model.add(Dropout(0.2)) model.add(Dense(8*N1, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(16*N1, activation='relu')) #model.add(Dropout(0.3)) model.add(Dense(16*N1,activation='relu')) #model.add(Dropout(0.3)) model.add(Dense(16*N1, activation='relu')) #model.add(Dropout(0.3)) model.add(Dense(16*N1, activation='relu')) model.add(Dropout(0.3)) #model.add(Dense(2048, activation='relu')) #model.add(Dense(64, activation='relu')) model.add(Dense(32 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(32 * N1, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128, kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) # model.add(Dropout(0.2)) # model.add(Dropout(0.5)) # model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) # model.add(Dense(128,kernel_regularizer=regularizers.l2(0.01), activation='relu')) model.add(Dense(64 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(64 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(64 * N1, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(64 * N1, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(N_last, activation='softmax')) return model #自定义优化器 if N_Layers1>5: model = create_model(N_Layers1, N1) if N_Layers1<6: model = create_full_nnw_model(N_Layers1, N_First_Layer_Neurons, N_Layer_Neurons2, N_Layer_Neurons3,N_Layer_Neurons4, N_Layer_Neurons5) graph = tf.get_default_graph() model.summary() rmsprop = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06) adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06) adadelta = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06) adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) adamax = keras.optimizers.Adamax(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) nadam = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) train_history=model.fit(x=train_x,y=train_y,validation_split=0.2,epochs=epoch,batch_size=32,verbose=1)#,callbacks=[Checkpoint]) model.save_weights(os.path.abspath(os.path.join(os.getcwd(), "..")) + '/'+ 'UserFiles' +'/'+ str(User_id) +'/'+ 'Result' +'/' + str(User_id) +'_Neural_Network_Model_weightsss.h5') ''' import matplotlib.pyplot as plt def show_train_history(train_history,train,validation): plt.plot(train_history.history[train]) plt.plot(train_history.history[validation]) plt.title('Train History') plt.ylabel(train) plt.xlabel('Epoch') plt.legend(['train','validation'],loc='upper left') plt.show() show_train_history(train_history, 'loss', 'val_loss')''' print('Testing') cost = model.evaluate(test_x, test_y, batch_size = 100) Classification_Result_loss = cost[0] Classification_Result_accuracy = cost[1] Data_Characteristic_n1 = feature+N_Last_Layer_Neurons #print(cost) #test_y_pred = model.predict(test_x) #print(test_y_pred) #print(test_y.shape) #test_y_pred=np.ravel(test_y_pred) #print(test_y_pred) #print(test_y_pred.shape) #init_lables = encoder.inverse_transform(test_y_pred) #print(test_y_pred.shape) #index_max = np.argmax(test_y_pred, axis=1) #max = test_y[range(test_y.shape[0]), index_max] #print(max) def text_save(content, User_id,N_Layer_Neurons2,mode = 'a'): # Try to save a list variable in txt file. t = os.path.abspath(os.path.join(os.getcwd(), "..")) newfile =t + '/'+ 'UserFiles' +'/'+ str(User_id) +'/'+ 'Result' +'/' + str(User_id)+'_Neural_Network_Model.csv' if not os.path.exists(newfile): f = open(newfile, 'w') print newfile f.close() print newfile + " created." else: print newfile + " already existed." file = open(newfile,mode) for i in range(len(content)): file.write(str(content[i]) + '\n') file.close() text_save(("输入特征X:",feature,"输出特征Y:",N_Last_Layer_Neurons,"分类结果误差:",Classification_Result_loss,"分类结果准确度:",Classification_Result_accuracy),User_id,N_Layer_Neurons2,mode = 'a') #return (Data_Characteristic_n1, Classification_Result_loss, Classification_Result_accuracy) return 1