def solve(x, c, c1, n, m, root, start, stop, f_index): print(__name__) if __name__ == 'ENM_terminal': startTime = datetime.now() pool = mp.Pool(processes=8) #n = 512j #m = 512j #root = [1.9619, 0.5525] #start = -50 #stop = 50 #f_index = 3 #c1 = [2,1] ans = np.zeros((int(n.imag), int(m.imag), 3)) ansSet = {} #c = np.repeat(np.array(c1), -(n*m).real, axis=0).reshape(2, -1).T #workvec = np.array([2,5]) #c = np.multiply(x, workvec) #c = x**2 #c = x-(1e-5) #c = -x #c = np.random.rand(int(-(n*m).real), 2) #list = [a for a in zip(x.tolist(), c.tolist())] for i, ii in enumerate(pool.imap(enm.solve, np.hstack((x, c)))): tools.printProgressBar(i + 1, -(n * m).real, prefix="Progress", suffix="Complete", length=50) if ii[0] is not None: if tuple(ii[0]) not in ansSet: ansSet[tuple(ii[0])] = np.array([len(ansSet) + 1, 0, 0], dtype='int32') # print(ansSet[tuple(t[0])]) work = ansSet[tuple(ii[0])] work[1] = ii[1] if ii[0] is None: work = [0, ii[1], 0] ans[i % int(n.imag), i // int(n.imag), :] = work pool.close() pool.join() print("done") print(datetime.now() - startTime) name = ( f"FN-{f_index} X ({start}, {stop}, {int(n.imag)}x{int(m.imag)})" + f" C ({c1})") with open(FILE_PATH + name + '.npy', "w+") as file: np.save(FILE_PATH + name + '.npy', ans, allow_pickle=False) np.save(FILE_PATH + name + '_ansSet' + '.npy', ansSet) print(ansSet) sendmessage("Task Finished") return
def Klearn(Kern, N, X): """ KLMS online update/train routine """ Xlen = X.shape[1] for i in range(N, Xlen): # cut a data chunk x = X[:, i - N:i][0][::-1] # update for chunk Kern.update(x, X[:, i]) # Progress Bar ts.printProgressBar(i, Xlen, prefix='KLMS Learning', length=25)
def train(model: CVAEModel, data: DataManager, folder_name, batch_size=1024 * 16, epochs=400, restart=True): ''' Training of a specific model using data and specific batch size. The training process saves every 10 epochs in specified folder, within folder names state0, state1 .... If there is states already saved the training process will be resumed on the last saved state unless restart is True. ''' state = create_initial_state(model, epochs) stateID = 0 while os.path.exists(folder_name + "\\state" + str(stateID + 1)): stateID += 1 if restart: shutil.rmtree(folder_name + "\\state" + str(stateID)) if restart: stateID = 0 if stateID > 0: state.load(folder_name + "\\state" + str(stateID)) else: state.save(folder_name + "\\state0") for e in range(epochs): loss = state.advance(data, batch_size=batch_size) if (state.getCurrentEpoch() % 10 == 0): printProgressBar((e + 1) / epochs, prefix='Epoch ' + str(state.getCurrentEpoch()), suffix='Loss: ' + str(loss)) stateID += 1 state.save(folder_name + "\\state" + str(stateID)) print()
def Kpredict(Kern, N, X): """ KLMS online test/predict routine """ Xlen = X.shape[1] Kern.prediction = [0] Kern.errors = [0] for i in range(N, Xlen): # cut a data chunk x = X[:, i - N:i][0][::-1] y = Kern.predict(x) # calculate error Kern.error = X[:, i] - y Kern.prediction.append(y) # save Test Error and Prediction Kern.errors.append(np.square(Kern.error)) # Progress Bar ts.printProgressBar(i, Xlen, prefix='KLMS Predicting', length=25)
m = 512j root = [0, -0] start = -2 stop = 2 f_index = 6 x = np.mgrid[start + root[0]:stop + root[0]:n, start + root[1]:stop + root[1]:m].reshape(2, -1).T ans = np.zeros((int(n.imag), int(m.imag), 3)) ansSet = {} #q = nm.Newton(x.tolist()[0]) #print("Return answer:", q[0]) for i, ii in enumerate(pool.imap(nm.solve, x.tolist()), start=0): tools.printProgressBar(i, -(n * m).real, prefix="Progress", suffix="Complete", length=50) if ii[0] is not None: if tuple(ii[0].flatten().tolist()) not in ansSet: ansSet[tuple(ii[0].flatten().tolist())] = np.array( [len(ansSet) + 1, ii[1], 0], dtype='int32') # print(ansSet[tuple(t.roots.flatten().tolist())]) work = ansSet[tuple(ii[0].flatten().tolist())] work[1] = ii[1] ans[i % int(n.imag), i // int(n.imag), :] = work pool.close() pool.join() print("Done in:", datetime.now() - startTime) print("Ans set length:", len(ansSet))
def f1(x): return 1 - (2 * x) def f2(x): return -2 + 0 * x fList = [f, f1, f2] RANGE_X = [-5, 6] RANGE_C = [-5, 6] FUNCTION = "x*(1-x)" dataPath = r'results' fileName = "/info.db" iter = 0 for i in range(*RANGE_X): for j in range(*RANGE_C): iter += 1 if j == i: continue tools.printProgressBar(iter + 1, (RANGE_X[1] - RANGE_X[0]) * (RANGE_C[1] - RANGE_C[0]), prefix="Progress", suffix="Complete", length=50) data = [tst.test(i, j, FUNCTION, fList)] tst.writeToDB(dataPath, fileName, data)
n = 100 m = 100 start = -20 stop = 20 f_index = 5 ans = np.zeros((m, n, 3)) ansSet = {} for i, ii in enumerate(np.linspace(start, stop, n)): for j, jj in enumerate(np.linspace(start, stop, m)): # print('this is i' ,i) # print('this is j', j) tools.printProgressBar(i * n + j, n * m - 1, prefix="Progress", suffix="Complete", length=50) x = np.array([ii, jj], dtype='float64').reshape((2, 1)) t = ENM.ENM(x, c, f) if t.roots is not None: if tuple(t.roots.flatten().tolist()) not in ansSet: # print("yay") ansSet[tuple(t.roots.flatten().tolist())] = np.array( [len(ansSet) + 1, len(t.steps), 0], dtype='int32') # print(ansSet[tuple(t.roots.flatten().tolist())]) ans[j, i, :] = ansSet[tuple(t.roots.flatten().tolist())] # print(datetime.now() - startTime) name = (f"F-{f_index} X ({start}, {stop}, {n}x{m})" + f" C ({c.flatten().tolist()}).npy")
x = np.linspace(2,6,15) #x = np.mgrid[0.1:10:20j, 0.1:10:20j].reshape(2, -1).T y = func(x,COEFFICIENTS) maxIterations = 100 Ans = None cnt = 0; coeff = 20 coefff = 1 for NOISE in np.linspace(0,4*coeff,9): SNR = 10*np.log10(np.abs(np.mean(y))/(np.abs(np.mean(y))+NOISE)) #NOISE = np.linalg.norm(y)/SNR - np.linalg.norm(y) for DISTANCE in np.linspace(0,6/coefff,13): ans = [] for r in range(3): cnt+=1 tools.printProgressBar(cnt, 3*13*9, prefix="Progress", suffix="Complete", length = 50) yn = y + NOISE * (np.array(lhsmdu.sample(y.shape[0],1)).flatten()) init_guess = ((np.array(lhsmdu.sample(len(COEFFICIENTS),1)).flatten()))*(10**DISTANCE) g = GNSolver(func, der_func, der2_func, original_root=COEFFICIENTS,max_iter = maxIterations) startTime = datetime.now() a = g.fit(x,yn,init_guess, False) if a is not None: #print(a[5]) r = g.get_mse(a[0]) try: if a[1]!=maxIterations: ans.append([NOISE/coeff,DISTANCE*coefff, a[1],SNR]) except: continue if len(ans) == 0: ans = [np.array([NOISE/coeff,DISTANCE*coefff,0,SNR])]
#with the hjm object instantiated we can start computing forward rate distributions: liborf = [] #compute forwards with starting time at t and tenors at T, namely F(to,t,T), where t0 is implicitely the most recent date in the us treasury input file, # t is the forward starting time and T is the tenor. t = [0.25, 0.5, 0.75, 1.0] T = [0.25, 0.5, 1.0] #our Monte Carlo simulation will include 1000 paths Nsims = 1000 rates = {} # here we run the Monte Carlo, computing all the forwards F(t0,t,T) specificed with t and T vectors for each path. tools.printProgressBar(0, Nsims * len(T), prefix='Progress:', suffix='Complete', length=50) counter = 0 for k in T: rates[k] = [] for n in range(Nsims): cube = hjmo.run_montecarlo_path() xxx = [ hjmo.integrateforward(cube=cube, t=s, TenorinYears=k) for s in t ] rates[k].append(xxx) tools.printProgressBar(counter, Nsims * len(T), prefix='Progress:', suffix='Complete',