def _predict(self, x, pre_inputs=None, pre_outputs=None): if pre_inputs is not None: y = pr.NNOut(x, self.net, pre_inputs, pre_outputs) else: delayed_input = [] for idx in range(0, self.inputs): delayed_input.append(x[idx, 0]) delay_data = self.past_data[idx, -(self.delay - 1):] delayed_input.extend(delay_data[::-1]) delayed_input = np.array(delayed_input) delayed_input = np.reshape(delayed_input, (self.layers[0], 1)) y = pr.NNOut(delayed_input, self.net) return y
def main(): # get our data as an array from read_in() res = json.loads(sys.stdin.read()) # data = [ [ 1578.0077, 0 ],[ 1581.1876, 5 ],[ 1452.4627, 33 ],[ 1449.7326, 58 ],[ 1501.0392, 80 ],[ 1460.4557, 110 ],[ 1492.824, 130 ],[ 1422.3826, 155 ],[ 1404.3431, 180 ],[ 1480.74, 210 ],[ 1410.3936, 230 ],[ 1612.336, 255 ],[ 1729.343, 280 ],[ 1735.5231, 305 ],[ 1632.595, 330 ],[ 1648.3143, 355 ],[ 1640.1972, 380 ],[ 1658.7949, 405 ],[ 1675.4953, 430 ],[ 1712.2672, 455 ],[ 1623.8666, 480 ],[ 1622.154, 505 ],[ 1630.9466, 530 ],[ 1595.8407, 555 ],[ 1548.5976, 580 ],[ 1598.6558, 605 ],[ 1624.0902, 630 ],[ 1616.8663, 655 ],[ 1661.251, 680 ],[ 2012.605, 705 ],[ 1904.3356, 730 ],[ 1760.5438, 755 ],[ 2449.3183, 780 ],[ 2417.4744, 805 ],[ 2431.7134, 830 ],[ 2391.2651, 855 ],[ 2402.8298, 885 ],[ 2417.0901, 905 ],[ 2403.8137, 930 ],[ 2407.1756, 955 ],[ 2363.049, 980 ],[ 2364.4589, 1010 ],[ 2368.4206, 1030 ],[ 2338.8434, 1055 ],[ 2369.9809, 1080 ],[ 2353.5891, 1105 ],[ 2380.8422, 1130 ],[ 2519.2731, 1155 ],[ 2557.5253, 1180 ],[ 2536.3437, 1205 ],[ 2517.6042, 1235 ],[ 2543.7378, 1255 ],[ 2355.5603, 1280 ],[ 2347.445, 1305 ],[ 2269.8631, 1335 ],[ 2307.6435, 1355 ],[ 2274.5249, 1380 ],[ 2319.0633, 1405 ],[ 2251.9456, 1430 ],[ 2273.7241, 1455 ],[ 2250.0617, 1480 ],[ 2272.8212, 1505 ],[ 2367.9611, 1530 ],[ 2351.8406, 1555 ],[ 2348.4958, 1580 ],[ 2308.7974, 1605 ],[ 2290.4632, 1630 ],[ 2303.6924, 1655 ],[ 2218.8104, 1680 ],[ 2260.9153, 1705 ],[ 2236.759, 1730 ],[ 2238.0003, 1755 ],[ 2222.3537, 1780 ],[ 2288.0802, 1805 ],[ 2240.4641, 1830 ],[ 2258.3908, 1855 ],[ 2175.4428, 1880 ],[ 2247.978, 1905 ],[ 2234.6417, 1930 ],[ 2232.0709, 1955 ],[ 2216.933, 1980 ],[ 2219.6263, 2005 ],[ 2304.114, 2030 ],[ 2230.2487, 2055 ],[ 2261.5, 2070 ] ] # #create a numpy array np_data = np.array(res['data']) # np_data = np.array(data) P = np_data[:, 1] steps = res['predict'] / res['step'] # steps = 25 Pl = np.concatenate((P, P[-1] + ((np.arange(1, steps).T) * 25))) Y = np_data[:, 0] nn = [1, 5, 5, 1] dIn = [1, 2, 3] dIntern = [] dOut = [1, 2, 3, 4] net = prn.CreateNN(nn, dIn, dIntern, dOut) net = prn.train_LM(P, Y, net, 1000, verbose=0) print('/') y_ap = prn.NNOut(Pl, net) result = np.column_stack((Pl, y_ap)) print(result.tolist())
def train_set(): global x_train global y_train global x_test global y_test print(len(x_train)) print(len(y_train)) structure = [24, 12, 1] for item in y_train: item = round(item, 3) x_train = np.asarray(x_train) y_train = np.asarray(y_train) y_test = np.asarray(y_test) x_test = np.asarray(x_test) print type(x_train.ndim) rnn = pyrenn.CreateNN(structure) #dIntern=[1] rnn = pyrenn.train_LM(np.transpose(x_train), np.transpose(y_train), rnn, verbose=True, k_max=200, E_stop=1e-7) out_test = pyrenn.NNOut(np.transpose(x_test), rnn) plt.plot(y_test, 'r', label='actual') plt.plot(out_test, 'b', label='predicted') mse = mean_squared_error(y_test, out_test) print "MSE = " + str(mse) plt.show()
def rnn(training_set_features, training_set_class, test_set_features, test_set_class): # 1D numpy arrays # rows: inputs or outputs # columns: samples P = np.array(training_set_features) P = np.transpose(P) Y = np.array(training_set_class) Y = np.reshape(Y,(-1, len(training_set_class))) Ptest = np.array(test_set_features) Ptest = np.transpose(Ptest) Ytest = np.array(test_set_class) Ytest = np.reshape(Ytest, (-1, len(test_set_class))) net = pyrenn.CreateNN([9, 18, 18, 1], dIn=[0], dIntern=[], dOut=[]) net = pyrenn.train_LM(P, Y, net, verbose=True, k_max=30, E_stop=1e-3) y = pyrenn.NNOut(P, net) ytest = pyrenn.NNOut(Ptest, net) create_predictions_file(ytest) """fig = plt.figure(figsize=(11,7))
def pred(nn_obj, nn_in): """ crete prediction with NN and IN data nn_obj, NN object nn_in: list, IN data return y_pred: np.array, predicted data """ if isinstance(nn_obj, dict): y_pred = prn.NNOut(np.array(nn_in.T), nn_obj) elif isinstance(nn_obj, Net_tr): y_pred = nn_obj.forward(tr.from_numpy( np.array(nn_in)).float()).detach().numpy() return y_pred
def trainGclmNN(train_data, f): fname, target_OP = generateFilenames(f) target_OP = np.array(target_OP) net = pyrenn.CreateNN([48, 20, 20, 1]) #target_OP=np.zeros((1,n)) net = pyrenn.train_LM(train_data.transpose(), np.array(target_OP), net, k_max=500, E_stop=0.5, verbose=True) y = pyrenn.NNOut(train_data.transpose(), net) for i, j in zip(final_OP(y), target_OP.transpose()): print(i, j) accuracy(final_OP(y), target_OP.transpose()) return net
def execute(self, df): df_copy = df.copy() # get model for predict predict_model = self.get_model_for_predict() if predict_model is None: # no pre-tained model df_copy[self.target] = 0 return df_copy # form input features from input feature vectors df_copy = self.one_hot_encode_time_feature( df_copy, predict_model['OneHotEncoderHour'], 'hour') df_copy = self.one_hot_encode_time_feature( df_copy, predict_model['OneHotEncoderWeekday'], 'weekday') # input feature vector must be the same as pre-trained models feature vector if not self.check_feature_vector(predict_model['FeatureVector']): df_copy[self.target] = 0 return df_copy logger.debug('Successfully performed set-up prior to prediction') entities = np.unique(df_copy.index.levels[0]) logger.debug(str(entities)) for entity in entities: # while we can perform predictions on all entities at once # under the assumption that there is one model for all entities # we use this architecture in case we decide to have a model per entity #convert features to numpy try: dfe = df_copy.loc[entity] predicted_values = prn.NNOut(dfe[self.features].T.to_numpy(), predict_model) df_copy.loc[entity, self.target] = predicted_values except Exception as e: df_copy.loc[entity, self.target] = 0 logger.error(f'{self.whoami} prediction failed with ' + str(e)) return df_copy
def LM_predic(LM_para_file, LM_para_normalfile, fileName, DI1, DI2): #load NN method for feature work net = prn.loadNN(LM_para_file) #Nomalized parameter df_normalize = pd.read_csv(LM_para_normalfile, header=None) normalize_factor = np.array(df_normalize.iloc[:, 0].tolist()) #Read file df_test = pd.read_excel(fileName, 'Summary').T #移除不需要的參數 columnDrop = [ '1300 AC', '1300 DC', '1300 HR', '1300 Area', '1300 PWTT', '1300 BVI value', '1300 BVI amp', '1300 BVI time', '1300 BVA value', ] df_test = df_test.drop(columnDrop, axis=1) input_x = df_test.iloc[0, :].tolist() #Diabetes index input_x.extend([DI1, DI2]) input_f = np.array(input_x) #參數正規化 input_x_normalized = input_f / normalize_factor input_x_pre = input_x_normalized.reshape(29, 1) #輸出讀值 glucose = prn.NNOut(input_x_pre, net) * 600 Glu = float(glucose[0]) #將array 轉成float Glu = round(Glu, 1) # 轉成float後才可以進行round指令 print(Glu) return Glu
# Creating a neural network using the Levenberg-Marquardt backpropagation training function # Used for quick descent training and possibly a more accurate prediction # Fewer hidden layers and less nodes are used due to a larger propensity to overfit # Cannont use a validation set for early stopping in pyrenn so these two lines are used to find convergence # Seems to converge around 10 epoches. Should stop early at 10 epoches to avoid overfitting on a small dataset net = pyrenn.CreateNN([8, 5, 1]) pyrenn.train_LM(X_train.T, Y_train.T, net, verbose=1, k_max=20) # The predictions are averaged across many different trained models for i in range(0, 10): print(i) net = pyrenn.CreateNN([8, 5, 1]) pyrenn.train_LM(X_train.T, Y_train.T, net, verbose=0, k_max=10) if i == 0: LM_predictions = pyrenn.NNOut(X_test.T, net) else: LM_predictions = (LM_predictions * (i) + pyrenn.NNOut(X_test.T, net)) / (i + 1) print(i) # Prints the correlation between the average of model outputs and the targets and then the correlation between the most recent model output and the targets print(np.corrcoef(LM_predictions.T, Y_test.T)[0, 1]**2) print(np.corrcoef(pyrenn.NNOut(X_test.T, net).T, Y_test.T)[0, 1]**2) # A scatter of Pyrenn model claims predictions vs. actual claims pl.scatter(LM_predictions.T, Y_test.T) pl.show() # Prints accuracy measures for each model
gamma = -0.35 # Loading pyrenn Model net = pyrenn.loadNN('pyrennweights_2.csv') # Input sr, sx = wavfile.read(sourcefile) l = len(sx) # framing sourceframes = librosa.util.frame(sx, frame_length=frameLength, hop_length=hop_length).astype(np.float64).T # Windowing sourceframes *= pysptk.blackman(frameLength) # extract MCEPs sourcemcepvectors = np.apply_along_axis(pysptk.mcep, 1, sourceframes, order, alpha) # provide the source MCEPs as input to the trained neural network which gives the target MCEPs mgc = pyrenn.NNOut(sourcemcepvectors.transpose(), net).transpose() mgc = mgc.copy(order="C") # Finding Log Spectrum. logspec = np.apply_along_axis(pysptk.mgc2sp, 1, mgc, 0.41, 0.0, frameLength) # Convert to FFT Domain. spec = np.exp(logspec).T # Convert to Time Domain. output_speechover = librosa.core.istft(spec, hop_length, frameLength, pysptk.blackman(frameLength)) # Output. librosa.output.write_wav("test_out.wav", output_speechover, sr)
# # #Train NN with training data P=input and Y=target # #Set maximum number of iterations k_max to 100 # #Set termination condition for Error E_stop to 1e-3 # #The Training will stop after 100 iterations or when the Error <=E_stop net = prn.train_LM(P, Y, net, verbose=True, k_max=100, E_stop=1e-3) prn.saveNN(net, "RNNmodel_st.mdl") print("saved") os._exit(0) # print("loading") # net = prn.loadNN("RNNmodel.mdl") # print("loaded") ### #Calculate outputs of the trained NN for train and test data y = prn.NNOut(P, net) # for i, o in zip(P, Y): # output = winner_net.activate(i) # print("input {!r}, expected output {!r}, got {!r}".format(i, o, output)) # print("expected output {!r}, got {!r}".format(o, output)) # print(y) # os._exit(0) # ytest = prn.NNOut(Ptest,net) ### #Plot results fig = plt.figure(figsize=(11, 7)) ax0 = fig.add_subplot(211) ax1 = fig.add_subplot(212)
xQV, bQV,\ )) xV = np.transpose(xV) mdl_name = datdir + 'Bin/pyrennNet4_' + basin + '_L' + str(nn) + '.csv' # #Create and train NN # print 'Training NN..' # net = prn.CreateNN([x.shape[0],7,1]) # net = prn.train_LM(x,t,net,verbose=True,k_max=500,E_stop=1e-5) # prn.saveNN(net,mdl_name) #Load saved NN net = prn.loadNN(mdl_name) #Calculate outputs of the trained NN for train and test data yV[:, nn] = prn.NNOut(xV, net) # tmp=y[:,nn] # tmp[tmp<0]=0 # tmp[tmp>400000]=0 # y[:,nn]=tmp # tmp=yV[:,nn] # tmp[tmp<0]=0 # tmp[tmp>400000]=0 # yV[:,nn]=tmp byV[0, nn] = yV[0, nn] for k in range(1, len(yV)): byV[k, nn] = min(yV[k, nn], ((1 - BFImax) * a * byV[k - 1, nn] +
# Train NN with training data Ptrain=input and Ytrain=target # Set maximum number of iterations k_max # Set termination condition for Error E_stop # The Training will stop after k_max iterations or when the Error <=E_stop net = prn.train_LM(Ptrain, Ytrain, net, verbose=True, k_max=1, E_stop=1e-5) print('Batch No. ', i, ' of ', number_of_batches) ### # Select Test data # Choose random number 0...5000-9 idx = np.random.randint(0, 5000 - 9) # Select 9 random Test input data P_ = Ptest[:, idx:idx + 9] # Calculate NN Output for the 9 random test inputs Y_ = prn.NNOut(P_, net) ### # PLOT fig = plt.figure(figsize=[11, 7]) gs = mpl.gridspec.GridSpec(3, 3) for i in range(9): ax = fig.add_subplot(gs[i]) y_ = np.argmax(Y_[:, i]) # find index with highest value in NN output p_ = P_[:, i].reshape(28, 28) # Convert input data for plotting ax.imshow(p_) # plot input data ax.set_xticks([]) ax.set_yticks([])
def building_with_storage(u,x,t,cst,Srs,Data): """For current timestep t and for current decision u transition from actual timestep i to the following timestep j is simulated for all possible states, which are stored in x. Costs of this transition and array of states after it are calculated. Args: u: decision from list of possible ones x: array, where all possible system states are stored t: actual timestep i cst: constants needed for calculation srs: values of needed timeseries Returns: cost: costs at timestep i x_j: array with states at timestep j after transition due to decision u data: dataframe, which keeps additional infromation about transition from i to j """ ############################################### #Defining T_room and P_th for timestep j #with using pre-trained NN ############################################### l = len(x[0]) delay=4 net = cst['net'] #create 5 inputs for input array P hour = Srs.loc[t]['hour'] solar = Srs.loc[t]['solar'] T_amb = Srs.loc[t]['T_amb'] user = Srs.loc[t]['use_room'] T_inlet = Srs.loc[t]['T_inlet'] #create 6th input in dependance of current decision if u=='pump on/storage on' or u=='pump off/storage on': massflow = cst['massflow'] elif u=='pump off/storage off' or u=='pump on/storage off': massflow = 0 #defining input array P for NN P = np.array([[hour],[solar],[T_amb],[user],[massflow],[T_inlet]],dtype = np.float) #prepare 5 inputs for P0 hour0 = Srs.loc[t-delay:t-1]['hour'].values.copy() solar0 = Srs.loc[t-delay:t-1]['solar'].values.copy() T_amb0 = Srs.loc[t-delay:t-1]['T_amb'].values.copy() user0 = Srs.loc[t-delay:t-1]['use_room'].values.copy() T_inlet0 = Srs.loc[t-delay:t-1]['T_inlet'].values.copy() #defining initial values, which are used inside the loop T_roomj = np.zeros(l) P_th = np.zeros(l) #defining initial values, which are used outside the loop E_j = np.zeros(l) P_el = np.zeros(l) costx = np.zeros(l) #loop for every possible temperature state for i,x1 in enumerate(x[0]): #prepare 6th input for P0 and 2 outputs for Y0 if t-delay<cst['t_start']: #take all values for P0 and Y0 from timeseries if Data is None or t==cst['t_start']: T_room0 = np.ones(delay) * x1 P_th0 = Srs.loc[t-delay:t-1]['P_th'].values.copy() massflow0 = Srs.loc[t-delay:t-1]['massflow'].values.copy() #take part of values from timeseries and part from big Data else: tx = t-cst['t_start'] T_room0 = np.concatenate([Srs.loc[t-delay:t-tx-1]['T_room'].values.copy(),Data.loc[t-tx-1:t-1].xs(i,level='Xidx_end')['T_room'].values.copy()]) P_th0 = np.concatenate([Srs.loc[t-delay:t-tx-1]['P_th'].values.copy(),Data.loc[t-tx-1:t-1].xs(i,level='Xidx_end')['P_th'].values.copy()]) massflow0 = np.concatenate([Srs.loc[t-delay:t-tx-1]['massflow'].values.copy(),Data.loc[t-tx-1:t-1].xs(i,level='Xidx_end')['massflow'].values.copy()]) #take all values for P0 and Y0 from big Data else: T_room0 =Data.loc[t-delay:t-1].xs(i,level='Xidx_end')['T_room'].values.copy() P_th0 = Data.loc[t-delay:t-1].xs(i,level='Xidx_end')['P_th'].values.copy() massflow0 = Data.loc[t-delay:t-1].xs(i,level='Xidx_end')['massflow'].values.copy() #Create P0 and Y0 P0 = np.array([hour0,solar0,T_amb0,user0,massflow0,T_inlet0],dtype = np.float) Y0 = np.array([T_room0,P_th0],dtype = np.float) #run NN for one timestep if np.any(P0!=P0) or np.any(Y0!=Y0) or np.any(Y0>1000): #if P0 or Y0 not valid use valid values and apply penalty costs costx[i] = 1000*10 T_roomj[i] = x1 P_th[i] = 0 else: T_roomj[i],P_th[i] = prn.NNOut(P,net,P0=P0,Y0=Y0) if T_roomj[i] != T_roomj[i] or P_th[i] != P_th[i]: pdb.set_trace() #calculating heat-storage state in dependance of chosen decision P_hp = 2 if u=='pump on/storage on': E_j=x[1] P_el = 3*P_th elif u=='pump on/storage off': E_j=x[1]+P_hp*3*0.25 P_el = P_hp elif u=='pump off/storage on': E_j=x[1]-P_th*0.25 P_el = 0 elif u=='pump off/storage off': E_j=x[1] P_el = 0 costx = costx + 99999*(E_j<x[1][0]) + 99999*(E_j>x[1][-1]) ############################################### #Building array x_j for the timestep j and #calculating all costs for transition from i to j ############################################### #building x_j x_j=np.vstack((T_roomj,E_j)) #selecting borders for allowed Troom Tmax = Srs.loc[t]['Tmax'] Tmin = Srs.loc[t]['Tmin'] #selecting borders for possible energy content of heat storage E Emax=x[1][-1] Emin=x[1][0] #Calculate penalty costs costx = (x_j[0]>Tmax)*(x_j[0]-Tmax)**2*99999 + (x_j[0]<Tmin)*(x_j[0]<Tmin)**2*9999\ +(x_j[1]>Emax)*99999 + (x_j[1]<Emin)*99999\ +costx #correcting x_j x_j[0] = np.clip(x_j[0],x[0][0],x[0][-1]) x_j[1] = np.clip(x_j[1],x[1][0],x[1][-1]) #Calculate costs cost = P_el * Srs.loc[t]['price_elec']*0.25 + costx #Define results to be put in Data data = pd.DataFrame(index = np.arange(l)) data['P_th'] = P_th data['P_el'] = P_el data['T_room'] = x_j[0] data['E'] = x_j[1] data['massflow'] = massflow data['cost'] = cost data['costx'] = costx return cost, x_j, data
test = pd.read_csv('C:/TermProject/ClassifiedRnn/testREC.csv', sep=',', header=None) train = np.array(train) test = np.array(test) net = prn.CreateNN([1000, 20, 1], dIn=[0], dIntern=[1], dOut=[1, 2]) net = prn.train_LM(train[:, 1:].T, train[:, 0], net, verbose=True, k_max=30, E_stop=1e-3) y = prn.NNOut(train[:, 1:].T, net) ytest = prn.NNOut(test[:, 1:].T, net) yTestPrd = np.array(ytest) yTrainPrd = np.array(y) yTestCor = test[:, 0] yTrainCor = train[:, 0] difTest = yTestPrd - yTestCor difTrain = yTrainPrd - yTrainCor accTest = np.mean(np.abs(difTest)) accTrain = np.mean(np.abs(difTrain)) print(accTrain) print(accTest)
massflow0 = Srs.loc[t - delay:t - 1]['massflow'].values.copy() T_room0 = Srs.loc[t - delay:t - 1]['T_room'].values.copy() P_th0 = Srs.loc[t - delay:t - 1]['P_th'].values.copy() hour = Srs.loc[t]['hour'] solar = Srs.loc[t]['solar'] T_amb = Srs.loc[t]['T_amb'] user = Srs.loc[t]['use_room'] T_inlet = Srs.loc[t]['T_inlet'] massflow = Srs.loc[t]['massflow'] P0 = np.array([hour0, solar0, T_amb0, user0, massflow0, T_inlet0], dtype=np.float) Y0 = np.array([T_room0, P_th0], dtype=np.float) P = np.array([[hour], [solar], [T_amb], [user], [massflow], [T_inlet]], dtype=np.float) T_room, P_th = prn.NNOut(P, net, P0=P0, Y0=Y0) Srs.loc[t, 'P_th'] = P_th Srs.loc[t, 'T_room'] = T_room fig = plt.figure(figsize=[11, 7]) ax0 = fig.add_subplot(211) ax0.plot(Srs.loc[timesteps]['T_room']) ax1 = fig.add_subplot(212) ax1.plot(Srs.loc[timesteps]['P_th']) plt.show()
y.pop(len(y) - 1) x.pop(0) inputdata = [] inputdata = copy.deepcopy(x) #inputdata = inputdata[len(inputdata)-1] check = copy.deepcopy(inputdata) inputdata = np.asarray(inputdata, dtype=float) xpredbest = np.asarray(xpredbest, dtype=float) x = np.asarray(x, dtype=float) y = np.asarray(y, dtype=float) x = x.reshape(30, len(x)) inputdata = inputdata.reshape(30, len(inputdata)) #Create NN net = pyrenn.CreateNN([30, 10, 1]) # Train net = pyrenn.train_LM(x, y, net, verbose=True, k_max=200, E_stop=1e-2) #Predict prevout = pyrenn.NNOut(inputdata, net) #plot and show plt.plot(range(len(xpredbest)), xpredbest) plt.plot(range(len(prevout)), prevout, color='red') plt.show()
def finddisease(request): del list[:] if request.method == 'POST': q1 = request.POST.get("symptom1") q2 = request.POST.get("symptom2") q3 = request.POST.get("symptom3") q4 = request.POST.get("symptom4") q5 = request.POST.get("symptom5") q6 = request.POST.get("symptom6") q7 = request.POST.get("symptom7") q8 = request.POST.get("symptom8") q9 = request.POST.get("symptom9") q10 = request.POST.get("symptom10") q11 = request.POST.get("symptom11") q12 = request.POST.get("symptom12") q13 = request.POST.get("symptom13") q14 = request.POST.get("symptom14") q15 = request.POST.get("symptom15") q16 = request.POST.get("symptom16") q17 = request.POST.get("symptom17") q18 = request.POST.get("symptom18") q19 = request.POST.get("symptom19") q20 = request.POST.get("symptom20") q21 = request.POST.get("symptom21") q22 = request.POST.get("symptom22") q23 = request.POST.get("symptom23") q24 = request.POST.get("symptom24") q25 = request.POST.get("symptom25") q26 = request.POST.get("symptom26") q27 = request.POST.get("symptom27") q28 = request.POST.get("symptom28") q29 = request.POST.get("symptom29") q30 = request.POST.get("symptom30") q31 = request.POST.get("symptom31") q32 = request.POST.get("symptom32") q33 = request.POST.get("symptom33") q34 = request.POST.get("symptom34") q35 = request.POST.get("symptom35") q36 = request.POST.get("symptom36") q37 = request.POST.get("symptom37") q38 = request.POST.get("symptom38") q39 = request.POST.get("symptom39") q40 = request.POST.get("symptom40") q41 = request.POST.get("symptom41") q42 = request.POST.get("symptom42") q43 = request.POST.get("symptom43") q44 = request.POST.get("symptom44") q45 = request.POST.get("symptom45") q46 = request.POST.get("symptom46") q47 = request.POST.get("symptom47") q48 = request.POST.get("symptom48") q49 = request.POST.get("symptom49") q50 = request.POST.get("symptom50") q = [ q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16, q17, q18, q19, q20, q21, q22, q23, q24, q25, q26, q27, q28, q29, q30, q31, q32, q33, q34, q35, q36, q37, q38, q39, q40, q41, q42, q43, q44, q45, q46, q47, q48, q49, q50 ] s = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] for i in range(len(q)): #for j in range(len(s)): if q[i] == 'on': s[i] = 1 wb = load_workbook('test.xlsx', data_only=True) #wb = load_workbook('dataset.xlsx', data_only=True) ws = wb.active ws['CZ2'].value = s[0] ws['DA2'].value = s[1] ws['DB2'].value = s[2] ws['DC2'].value = s[3] ws['DD2'].value = s[4] ws['DE2'].value = s[5] ws['DF2'].value = s[6] ws['DG2'].value = s[7] ws['DH2'].value = s[8] ws['DI2'].value = s[9] ws['DJ2'].value = s[10] ws['DK2'].value = s[11] ws['DL2'].value = s[12] ws['DM2'].value = s[13] ws['DN2'].value = s[14] ws['DO2'].value = s[15] ws['DP2'].value = s[16] ws['DQ2'].value = s[17] ws['DR2'].value = s[18] ws['DS2'].value = s[19] ws['DT2'].value = s[20] ws['DU2'].value = s[21] ws['DV2'].value = s[22] ws['DW2'].value = s[23] ws['DX2'].value = s[24] ws['DY2'].value = s[25] ws['DZ2'].value = s[26] ws['EA2'].value = s[27] ws['EB2'].value = s[28] ws['EC2'].value = s[29] ws['ED2'].value = s[30] ws['EE2'].value = s[31] ws['EF2'].value = s[32] ws['EG2'].value = s[33] ws['EH2'].value = s[34] ws['EI2'].value = s[35] ws['EJ2'].value = s[36] ws['EK2'].value = s[37] ws['EL2'].value = s[38] ws['EM2'].value = s[39] ws['EN2'].value = s[40] ws['EO2'].value = s[41] ws['EP2'].value = s[42] ws['EQ2'].value = s[43] ws['ER2'].value = s[44] ws['ES2'].value = s[45] ws['ET2'].value = s[46] ws['EU2'].value = s[47] ws['EV2'].value = s[48] ws['EW2'].value = s[49] wb.save('test.xlsx') df = pd.ExcelFile('test.xlsx').parse('Sheet') P0 = np.array([ df['s1'].values, df['s2'].values, df['s3'].values, df['s4'].values, df['s5'].values, df['s6'].values, df['s7'].values, df['s8'].values, df['s9'].values, df['s10'].values, df['s11'].values, df['s12'].values, df['s13'].values, df['s14'].values, df['s15'].values, df['s16'].values, df['s17'].values, df['s18'].values, df['s19'].values, df['s20'].values, df['s21'].values, df['s22'].values, df['s23'].values, df['s24'].values, df['s25'].values, df['s26'].values, df['s27'].values, df['s28'].values, df['s29'].values, df['s30'].values, df['s31'].values, df['s32'].values, df['s33'].values, df['s34'].values, df['s35'].values, df['s36'].values, df['s37'].values, df['s38'].values, df['s39'].values, df['s40'].values, df['s41'].values, df['s42'].values, df['s43'].values, df['s44'].values, df['s45'].values, df['s46'].values, df['s47'].values, df['s48'].values, df['s49'].values, df['s50'].values ]) net = prn.loadNN('C:\Users\STUTI\Desktop\minor_final.csv') diseases = [ 'AIDS (acquired immuno-deficiency syndrome)', 'Adhesion', 'Affect labile', 'Alzheimers disease', 'Anemia', 'Aphasia', 'Asthma', 'Biliary calculus', 'Bipolar disorder', 'Carcinoma prostate', 'Cholecystitis', 'Chronic alcoholic intoxication', 'Chronic kidney failure', 'Chronic obstructive airway disease', 'Coronary arteriosclerosis', 'Decubitus ulcer', 'Degenerative polyarthritis', 'Deglutition disorder', 'Dehydration', 'Depressive disorder', 'Diverticulosis', 'Pulmonary Embolism', 'Encephalopathy', 'Endocarditis', 'Epilepsy', 'Heart Failure', 'Kidney Failure', 'Fibroid tumor', 'Gastritis', 'Gout', 'Hepatitis', 'Hernia hiatal', 'Hyperbilirubinemia', 'Hypercholesterolemia', 'Hyperglycemia', 'Hypothyroidism', 'Ileus', 'Incontinence', 'Infection urinary tract', 'Influenza', 'Insufficiency renal', 'Lymphoma', 'Malignant neoplasm of breast', 'Malignant neoplasm of prostate', 'Malignant tumor of colon', 'Myocardial infarction', 'Neoplasm', 'Neoplasm Metastasis', 'Obesity', 'Obesity Morbid' ] y = prn.NNOut(P0, net) z = y np.around(y, 0, z) for i in range(len(z)): if z[i][0] == 1: list.append(diseases[i]) if request.session['first_name'] is None: template = loader.get_template('symptomchecker/index.html') context = RequestContext(request, { 'checked': list, 'checked1': ws }) return HttpResponse(template.render(context)) else: template = loader.get_template('symptomchecker/indexloggedin.html') context = RequestContext( request, { 'checked': list, 'checked1': ws, 'name': request.session['first_name'] }) return HttpResponse(template.render(context)) else: if request.session['first_name'] is None: sform = symform() template = loader.get_template('symptomchecker/symtest.html') context = RequestContext(request, {'symform': sform}) return HttpResponse(template.render(context)) else: sform = symform() template = loader.get_template('symptomchecker/symptomc.html') context = RequestContext(request, { 'symform': sform, 'name': request.session['first_name'] }) return HttpResponse(template.render(context))
def lm_NN(nn_in, nn_out, er_tar, main_win: tk.Toplevel, min_n=0, max_n=0, n_epochs=50, conf=[1, 1, 1], sect_ner=False, train_test=False, retrain=False, spl_coef=0.1, root_width=200, root_height=200, root_x=50, root_y=50): """ create and train pyrenn NN with LM optimization algorithm nn_in: list, train NN IN data nn_out: list, train OUT data er_tar: float, MSE target min_n: int, minimum neurons for selection of the number of neurons max_n: int, maximum neurons for selection of the number of neurons n_epochs: int, maximum NN train epochs conf: list, NN configuration, first element- numbers of input, last element - numbers of outputs, other elemnts - number of neuronus on hidden layers sect_ner: bool or 0/1, whether to select the number of neurons, True if yes, False if no train_test: bool or 0/1, should the input sample be separated into training and test retrain: bool or 0/1, overfitting protection return nn_obj, NN object conf: list, NN neurons configuration """ if train_test or retrain: x = tr.from_numpy(nn_in).float() y = tr.from_numpy((nn_out)).float() dataset = tr.utils.data.TensorDataset(x, y) a = int(len(dataset) * (1 - spl_coef)) data_trn, data_test = tr.utils.data.random_split( dataset, [a, int(len(dataset) - a)], generator=tr.Generator().manual_seed(42)) dataloader = tr.utils.data.DataLoader(data_trn, shuffle=False, batch_size=len(data_trn)) nn_in = (next(iter(dataloader))[0].numpy()).T nn_out = (next(iter(dataloader))[1].numpy()).T nn_in_test = data_test[:][0].numpy().T nn_out_test = data_test[:][1].numpy().T else: nn_in_test = 0 nn_out_test = 0 nn_in = nn_in.T nn_out = nn_out.T if sect_ner: if min_n > max_n: print("Error: minimum neurons>maximum neurons") for i in range(len(conf) - 2): conf[i + 1] = min_n for i in range(1, len(conf) - 1): min_loss = 20000 b = conf[i] for j in range(min_n, max_n + 1): conf[i] = j nn_obj = prn.CreateNN(conf) nn_obj = prn.train_LM(nn_in, nn_out, nn_obj, verbose=False, k_max=1, E_stop=1e-10) y_pred = prn.NNOut(nn_in, nn_obj) a = loss(y_pred, nn_out, nn_obj) print("Current configuration:", conf, ";\t Loss:", a, "%") if a < min_loss: min_loss = a b = j conf[i] = b neurons_number_info(conf, root_height + root_y, root_x) print("Best configuration:", conf) nn_obj = prn.CreateNN(conf) loss_val = [] loss_test = [] epochs_sum = 0 train = True lin_tr = None can_tr = None lin_test = None can_test = None vert_coef = None hor_coef = None while train == True: nn_obj, vert_coef, hor_coef, epochs_sum, err, lin_tr, can_tr, lin_test, can_test = train_body_LM( nn_obj, nn_in, nn_in_test, nn_out, nn_out_test, n_epochs, epochs_sum, loss_val, loss_test, er_tar, train_test, retrain, root_width, root_height, root_x, root_y, lin_tr, can_tr, lin_test, can_test, vert_coef, hor_coef) train_win = Continue_Train(err, epochs_sum) main_win.wait_window(train_win) train = train_win.answer if not train_win.answer: return nn_obj, conf, vert_coef, hor_coef #train = False return nn_obj, conf, vert_coef, hor_coef
y_of_test = y_test.T / 600 #------------------------------讀檔創建Dataframe--------------------------------- #----------------------------ANN 主程式--------------------------------------------- #8 input,2 hidden layer, 3 neuron (create NN) net = prn.CreateNN( [features_Num, hiddenlayer1_features, hiddenlayer2_features, 1]) # Train by NN net = prn.train_LM(x_of_train, y_of_train, net, verbose=True, k_max=iteration, E_stop=1e-10) # print out result y_prn_train = prn.NNOut(x_of_train, net) y_prn_test = prn.NNOut(x_of_test, net) # print('x train data 預測的 Predicted Y:','\n',y_prn_train*600) # print('x test data 預測的 Predicted Y:','\n',y_prn_test*600) #----------------------------ANN 主程式--------------------------------------------- #----------------------------確認執行後的結果------------------------------------------ # visualize result plt.scatter(y_of_train * 600, y_prn_train * 600, label='Train sets (70% of data)') plt.scatter(y_of_test * 600, y_prn_test * 600, label='Verify sets (30% of data)') plt.title('ANN Simulation Result') plt.xlabel('Input glucose (mg/dL)')
def train_body_LM(nn_obj, nn_in, nn_in_test, nn_out, nn_out_test, n_epochs, epochs_sum, loss_val, loss_test, er_tar, train_test, retrain, root_width, root_height, root_x, root_y, lin_tr, can_tr, lin_test=None, can_test=None, vert_coef=None, hor_coef=None): for i in range(n_epochs): nn_obj = prn.train_LM(nn_in, nn_out, nn_obj, verbose=False, k_max=1, E_stop=1e-10) y_pred = prn.NNOut(nn_in, nn_obj) loss_val.append(loss(y_pred, nn_out, nn_obj)) print("Train data loss:", loss_val[-1], "%") print(i) if i == 0 and epochs_sum == 0: lin_tr, can_tr, vert_coef, hor_coef = loss_plot( i, loss_val, "Train data loss", root_width, root_height, root_x, root_y) else: update_plot(lin_tr, can_tr, epochs_sum + i, loss_val) if train_test or retrain: y_pred_test = prn.NNOut(nn_in_test, nn_obj) loss_test.append(loss(y_pred_test, nn_out_test, nn_obj)) print("Test data loss:", loss_test[-1], "%") if i == 0 and epochs_sum == 0: lin_test, can_test, vert_coef, _ = loss_plot( i, loss_test, "Test data loss", hor_coef, root_height, root_x, root_y) else: update_plot(lin_test, can_test, epochs_sum + i, loss_test) if i > 3 and retrain: if loss_test[-1] * 3 - (loss_test[-2] + loss_test[-3] + loss_test[-4]) > 0: print("Retraining") epochs_sum += i + 1 return nn_obj, vert_coef, hor_coef, epochs_sum, loss_val[ -1], lin_tr, can_tr, lin_test, can_test if loss_val[-1] <= er_tar: break epochs_sum += i + 1 return nn_obj, vert_coef, hor_coef, epochs_sum, loss_val[ -1], lin_tr, can_tr, lin_test, can_test
def Tranning_by_Neural_Network(): #------------------------------讀檔創建Dataframe--------------------------------- #filepath='C:\\Users\\richard.weng\\Documents\\Python Scripts\\python_projects\\(1) NIVG Project\\ANN\\' file_data = file_name.get() + '.csv' df0 = pd.read_csv(file_data) #選擇受測人 #df = df[df.Name=='Nick'] df = df0.iloc[:, 1:] #移除first column of tester print(df.T.tail()) print('--------------------------------------------') print('df 長度為:', len(df)) print('--------------------------------------------') P = df.T.iloc[1:features_Num.get() + 1, 0:len(df)] print(P.tail()) print('input的格式:', P.shape) print('--------------------------------------------') Y = df.T.iloc[0:1, 0:len(df)] print(Y.tail()) print('output的格式:', Y.shape) print('--------------------------------------------') #轉成2d array P = np.array(P) Y = np.array(Y) # 假設70%訓練,30%要驗證 (TrainingData and TestingData) x_train, x_test, y_train, y_test = train_test_split(P.T, Y.T, test_size=0.3, random_state=None) x_of_train = (x_train / np.amax(x_train, axis=0)).T x_of_test = (x_test / np.amax(x_train, axis=0)).T y_of_train = y_train.T / 600 y_of_test = y_test.T / 600 #------------------------------讀檔創建Dataframe------------------------------------ #----------------------------ANN 主程式--------------------------------------------- #8 input,2 hidden layer, 3 neuron (create NN) net = prn.CreateNN([ features_Num.get(), hiddenlayer1_features.get(), hiddenlayer2_features.get(), 1 ]) # Train by NN net = prn.train_LM(x_of_train, y_of_train, net, verbose=True, k_max=iteration.get(), E_stop=1e-10) # print out result y_prn_train = prn.NNOut(x_of_train, net) y_prn_test = prn.NNOut(x_of_test, net) # print('x train data 預測的 Predicted Y:','\n',y_prn_train*600) # print('x test data 預測的 Predicted Y:','\n',y_prn_test*600) #----------------------------ANN 主程式--------------------------------------------- #----------------------------確認執行後的結果------------------------------------------ # visualize result plt.scatter(y_of_train * 600, y_prn_train * 600) plt.scatter(y_of_test * 600, y_prn_test * 600) plt.title('ANN Simulation Result') plt.xlabel('Input glucose (mg/dL)') plt.ylabel('Predicted glucose (mg/dL)') plt.grid() plt.show() print('測試組原本的糖值:', '\n', y_of_test * 600) print('測試組預測的糖值:', '\n', y_prn_test * 600) #----------------------------確認執行後的結果------------------------------------------ #Save ANN prn.saveNN(net, file_name.get() + '_LM_parameter' + '.csv') #Check final correlation y_all = prn.NNOut((P.T / np.amax(x_train, axis=0)).T, net) * 600 plt.scatter(Y.flatten(), y_all) Name = df0['Name'].values.tolist() df_result = pd.DataFrame({ 'Name': Name, 'total_y': Y.flatten(), 'total_pre_y': y_all }) print('相關性分析:\n', df_result.corr()) #列印出多少數據 print('總共樣本數:', len(df_result)) #Save the new result into new Excel df_result.to_csv(file_name.get() + '_LM_result' + '.csv')
print(xpred) print(xpred.shape) xpred = xpred.reshape(30, len(xpred)) print(xpred.shape) print(x.shape) print(xpred) #print(ny_y.shape) # Train the Linear Regression Object #mlpr= MLPRegressor().fit(x,y) net = pyrenn.CreateNN([30, 10, 1]) #print(net) net = pyrenn.train_LM(x, ny_y, net, verbose=True, k_max=200, E_stop=1e-2) y2 = pyrenn.NNOut(xpred, net) print(y2) """ ytest = pyrenn.NNOut(Ptest,net) fig = plt.figure(figsize=(11,7)) ax0 = fig.add_subplot(211) ax1 = fig.add_subplot(212) fs=18 #Train Data ax0.set_title('Train Data',fontsize=fs) ax0.plot(x,y2,color='b',lw=2,label='NN Output') ax0.plot(x,y,color='r',marker='None',linestyle=':',lw=3,markersize=8,label='Train Data') ax0.tick_params(labelsize=fs-2) ax0.legend(fontsize=fs-2,loc='upper left')
def outputNetwork(f): img=cv2.imread(f+"image.png",0) process_image(0,f+"image.png",f,f) make_square(f+"/0.png") resize_image(f+"/0.png") features_list=[] img = cv2.imread(f+"/0.png",0) image_features =hog(img, block_norm='L2-Hys', pixels_per_cell=(16, 16)) features_list.append(image_features) feature_matrix=np.array(features_list) ss = StandardScaler() # run this on our feature matrix fracture_stand = ss.fit_transform(feature_matrix) pca = PCA(n_components=500) # use fit_transform to run PCA on our standardized matrix fracture_pca = ss.fit_transform(fracture_stand) # look at new shape #print('PCA matrix shape is: ', fracture_pca.shape) X = pd.DataFrame(fracture_pca) svm= load("C:/TrainedModels/svm_model_PI12_68.36%.csv") y_pred = svm.predict(X) svm_prob=svm.predict_proba(X)[0] print(type(svm_prob)) valid_data=glcmNN(f,1) net=pyrenn.loadNN("C:/TrainedModels/glcm_model_1.csv") y = pyrenn.NNOut(valid_data.transpose(),net) final=0 glcmOutput = final_OP(y)[0][0] SVMOutput = y_pred[0] glcm_prob = [0.0, 0.0] if glcmOutput == 1: glcm_prob = [0.25, 0.75] else: glcm_prob = [0.75, 0.25] ''' for i,j in zip(final_OP(y),y_pred): if i[0]==j==0: final=0 elif i[0]==j==1: final=0 elif j==1: final=1 elif i[0]==0: final=0 else: final=0 ''' print("GCLM output:",glcmOutput) print("SVM output:",SVMOutput) print("GCLM prob:",glcm_prob) print("SVM prob:",svm_prob) final_prob = [(svm_prob[0]+glcm_prob[0])/2, (svm_prob[1]+glcm_prob[1])/2] print('Final prob: ',final_prob) if final_prob[0] > 0.5: final = 0 else: final = 1 return final, final_prob[1]
Y = np.array([y['pitch'].values, y['roll'].values]) x = pd.read_csv('test_output_100ms.txt', index_col=False) Ptest = np.array([x['pitch'].values, x['roll'].values]) print P y = pd.read_csv('test_input_100ms.txt', index_col=False) Ytest = np.array([y['pitch'].values, y['roll'].values]) t2 = np.array(y['time'].values) print 'Creating' net = prn.CreateNN([2, 7, 7, 2]) print 'Start training...' prn.train_LM(P, Y, net, verbose=True, k_max=40, E_stop=1e-3) ### ##Calculate outputs of the trained NN for train and test data y = prn.NNOut(P, net) ytest = prn.NNOut(Ptest, net) print ytest ### #Plot results fig = plt.figure(figsize=(15, 10)) ax0 = fig.add_subplot(221) ax1 = fig.add_subplot(222, sharey=ax0) ax2 = fig.add_subplot(223) ax3 = fig.add_subplot(224, sharey=ax2) fs = 18 #t1 = np.arange(0,2227.0)/4 #480 timesteps in 15 Minute resolution #t1= pd.read_csv('train_input.txt',index_col = False) #t2 = np.arange(0,982.0)/4 #480 timesteps in 15 Minute resolution #Train Data print len(t1)
fileName = '0905-1_Volts_20190122_090921.txt_new.xlsx' df_test = pd.read_excel(fileName,'Summary').T #移除不需要的參數 columnDrop = [ '1300 AC', '1300 DC', '1300 HR', '1300 Area', '1300 PWTT', '1300 BVI value', '1300 BVI amp', '1300 BVI time', '1300 BVA value', ] df_test = df_test.drop(columnDrop, axis=1) input_x = df_test.iloc[0,:].tolist() #Diabetes index input_x.extend([1.0,4.0]) input_f = np.array(input_x) #參數正規化 input_x_normalized = input_f/normalize_factor input_x_pre = input_x_normalized.reshape(29,1) #輸出讀值 glucose = prn.NNOut(input_x_pre,net)*600 print(glucose)
fig, ax1 = plt.subplots() ax1.plot(row_temp_training, dc1_p_training, 'g-') ax2 = ax1.twinx() ax2.plot(row_temp_training, dc2_p_training, 'r') plt.show() plt.clf() plt.close() row_temp_training = np.array(row_temp_training) dc1_p_training = np.array(dc1_p_training) dc2_p_training = np.array(dc2_p_training) G_t_training = np.array(G_t_training) G_t_predict = prn.NNOut(dc1_p,net) print G_t_predict print G_t fig, ax1 = plt.subplots() ax1.plot(row_temp, G_t, 'b-') ax2 = ax1.twinx() ax2.plot(row_temp, G_t_predict, 'r') plt.show() plt.clf() plt.close()
inputdata = np.asarray(inputdata, dtype=float) xpredbest = np.asarray(xpredbest, dtype=float) x = np.asarray(x, dtype=float) y = np.asarray(y, dtype=float) x = x.reshape(30, len(x)) inputdata = inputdata.reshape(30, len(inputdata)) print(x.shape) print(y.shape) print(inputdata.shape) net = pyrenn.CreateNN([30, 10, 1]) net = pyrenn.train_LM(x, y, net, verbose=True, k_max=200, E_stop=1e-2) prevxpred = xpred prevout = pyrenn.NNOut(inputdata, net) plt.plot(range(len(prevout)), prevout, color='red') previnputdata = inputdata temp = inputdata.tolist() temp1 = prevout.tolist() temp.append(temp1[len(temp1) - 31:len(temp1) - 1]) print(temp) print(len(temp[len(temp) - 1])) print(len(temp[len(temp) - 2])) inputdata = np.asarray(temp, dtype=float) for i in range(30): current = pyrenn.NNOut(inputdata, net, [previnputdata, prevout]) prevout = current
#3 neurons each and 1 output #the NN uses the input data at timestep t-1 and t-2 #The NN has a recurrent connection with delay of 1,2 and 3 timesteps from the output # to the first layer (and no recurrent connection of the hidden layers) net = prn.CreateNN([1,3,3,1],dIn=[1,2],dIntern=[],dOut=[1,2,3]) #Train NN with training data P=input and Y=target #Set maximum number of iterations k_max to 200 #Set termination condition for Error E_stop to 1e-3 #The Training will stop after 200 iterations or when the Error <=E_stop net = prn.train_LM(P,Y,net,verbose=True,k_max=200,E_stop=1e-3) ### #Calculate outputs of the trained NN for test data with and without previous input P0 and output Y0 ytest = prn.NNOut(Ptest,net) y0test = prn.NNOut(Ptest,net,P0=P0test,Y0=Y0test) ### #Plot results fig = plt.figure(figsize=(11,7)) ax1 = fig.add_subplot(111) fs=18 #Test Data ax1.set_title('Test Data',fontsize=fs) ax1.plot(ytest,color='b',lw=2,label='NN Output without P0,Y0') ax1.plot(y0test,color='g',lw=2,label='NN Output with P0,Y0') ax1.plot(Ytest,color='r',marker='None',linestyle=':',lw=3,markersize=8,label='Test Data') ax1.tick_params(labelsize=fs-2) ax1.legend(fontsize=fs-2,loc='lower right')
best_model=prn.train_LM(t2.T,t1.T,best_model,verbose=False,k_max=10,E_stop=1e-8) if i==10: t1=np.array(coll_t1).reshape(-1,2).T t2=np.array(coll_t2).reshape(-1,1).T np.savetxt('t1',t1) np.savetxt('t2',t2) best_run, best_model = optim.minimize(model=create_model, data=data, algo=tpe.suggest, max_evals=10, trials=Trials()) print(i+1) if i>9: temp=t2.min()-1e-3 new=(prn.NNOut(np.array([[temp]]),best_model)).T nt=func(new[0]) print(nt) fk1.append(nt) fk2.append(new) if i%10==0 and i!=0 and i<50: half=(b2-b1)/2 new=new[0]-b1 tar=new>half print(tar) if tar[0]: b1[0]=half[0]+b1[0] else: b2[0]=half[0]+b1[0]