import nn as nn from stock import Stock import stock_data as sd import getSP500 as sp from top10 import top10 import time import datetime import os if os.geteuid() != 0: exit("You need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.") run_this = True while True: now = datetime.datetime.now() if now.hour >= 0 and now.hour < 16: run_this = True if now.hour >= 16 and run_this == True: nn.run() run_this = False time.sleep(3550)
for c in Lambda for d in weight for e in batch_size for f in epochs for g in activation] parm_dict = {} # holds the hyperparameter combination for one run results = [] # holds the hyperparemeters and results for each run start_time = time.time() loop = count = 1 for x in parms: for i in range(loop): parm_dict['l1_size'] = x[0] parm_dict['learning_rate'] = x[1] parm_dict['lambda'] = x[2] parm_dict['weight'] = x[3] parm_dict['batch_size'] = x[4] parm_dict['epochs'] = x[5] parm_dict['activation'] = x[6] job_name = "job_" + job_id + "/" + "run_" + str(count) lift = run(data_dict, parm_dict, job_name) tup = (count, parm_dict, lift) results.append(tup) count += 1 # Write out a summary of the results writeResults(results, job_id) job_id = int(job_id) job.setJob(job_id + 1) print("Job {} complete after {:,.0f} minutes".format( str(job_id), (time.time() - start_time) / 60))
# train.hist(column='dQrauto_KCE', bins =100) # 2. Scaling and Transform original data utils.pTitle("2. Scaling and Transform original data") train_val = train.values scaler = StandardScaler() # scaler = StandardScaler(with_mean=False) scaler.fit(train_val) print(scaler.mean_) train_scal = scaler.transform(train_val) Xtrain = train_scal[:, 0:2] Ytrain = train_scal[:, 2] test_val = test.values test_scal = scaler.transform(test_val) Xtest = test_scal[:, 0:2] Ytest = test_scal[:, 2] #! Explore # plt.hist(Ytrain, bins = 100) neuronas = [15, 45, 65, 75, 115, 150] for i in neuronas: capas = [2, 3, 4] for c in capas: utils.pTitle2("Neuronas: " + str(neuronas)) out = nn.run(Xtrain, Ytrain, Xtest, Ytest, resdir, i, c) #! save scaled real data # #predictions.T[0],Ytest out = (out * np.sqrt(scaler.var_[2])) + scaler.mean_[2] out.to_csv(resdir + "/Predic.csv")
for f in epochs for g in activation] parm_dict = {} count = 1 start_time = time.time() loop = 1 for i in range(loop): for x in parms: loop_time = time.time() parm_dict['l1_size'] = x[0] parm_dict['learning_rate'] = x[1] parm_dict['lambda'] = x[2] parm_dict['weight'] = x[3] parm_dict['batch_size'] = x[4] parm_dict['epochs'] = x[5] parm_dict['activation'] = x[6] results = run(data_dict, parm_dict, count) save_results(count, parm_dict, results) count += 1 job_id += 1 job.setJob(job_id) summary.close() print('Total time: {:,.0f} minutes'.format( (time.time() - start_time) / 60)) sys.exit()