def main(): #Data.highlighted_image.browse(Data.dirs.images.images, Data.dirs.images.cell) #return images = Data.ImageIterableDataset( (Data.dirs.images.images, Data.dirs.images.cell), Data.data_aug.flip_scale_pipeline, Data.CropGenerator( (256, 256), validate_crop=lambda t: .1 < t[1].mean(), # cell prominence ), n_streams=8, ) images = torch.utils.data.DataLoader(images, batch_size=64, drop_last=True) ae = Models.Autoencoders.VAE(2048, 2) Models.LayerInfo.on() print(ae) out = ae(next(iter(images))) Models.LayerInfo.off() Training.VAE.train(ae, images, epochs=256)
import os from timeit import default_timer as timer from src import Plotting as p import pickle n = 1000 # n: Numero de agentes s = 8 # s: Numero de estrategias por agente g_a = 60 # g_a: periodo de DGA g_m = 0.5 # g_m: ratio de mutacion g_p = 8 # g_p: tamao de la 'communication pool' p_max = 10 # p_max: pos max de cada agente ncycles = 100 # ncycles: Numero de ciclos en los que el algoritmo recorre el periodo de optimizacion max_t = 20 # Valor max que pueden alcanzar los valores Tm, Td, Ty dentro de las estragias stock = 'SP 500' df = Data.get_pandas_dataframe(r'datasets/SP500.csv', 'Open').drop(columns=['index']) title = 'SP 500' prices = list(df.Price.values) dates = list(df.Date.values) Mt = list(df.Mt.values) Dt = list(df.Dt.values) Yt = list(df.Yt.values) initial_date = '1990-01-02' cut_date = '2005-05-12' end_date = '2012-02-10' initial_idx = dates.index(initial_date) cut_idx = dates.index(cut_date) end_idx = dates.index(end_date)
elif message.content.startswith(Constants.status): await statusPrompt.print_current_statuses(message) elif message.content.startswith(Constants.my_status): await statusPrompt.print_my_status(message) elif message.content.startswith(Constants.heroAction): await actionPrompt.do_action(message) elif message.content.startswith(Constants.partyAction): await roomChange.parse_action(message) data = Data.Data({ Constants.items: [], Constants.characters: [], Constants.rooms: copy.copy(Constants.room_names), Constants.doors: copy.copy(Constants.door_names), }) data.current_room.objects.append( Character( {Constants.health: 500, Constants.value: 50, Constants.attack: 0, Constants.speed: 0, Constants.mana: 0, Constants.crit: 0, Constants.name: "Target_Dummy", Constants.description: "An unassuming target dummy", Constants.inventory: [Item({ Constants.name: "Head", Constants.description: "The head of your conquered foe, the target dummy",
from __future__ import print_function import keras.backend as K import numpy as np from keras.utils import plot_model import src.Data as Data import src.net as net ''' the file can draw the structure of the neural network ''' (x_train, y_train),(x_test, y_test),input_shape, batch_size, num_classes, epoches = Data.getMiniData() np.random.seed(14343) cnn = net.cnnOneLayer(input_shape, 2, (4, 4), batch_size=128) plot_model(cnn, show_shapes=True, show_layer_names=True,to_file='./logs/cnn.pdf') np.random.seed(14343) fcnn = net.fcOneLayer((13 * 13, 16), batch_size=128) plot_model(fcnn, show_shapes=True, show_layer_names=True,to_file='./logs/fc.pdf') K.clear_session()
type=str, default='model_files/MIE', help='Location to save.') args = parser.parse_args() # import pdb; pdb.set_trace() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id dictionary = Dictionary() dictionary.load('./data/dictionary.txt') ontology = Ontology(dictionary) ontology.add_raw('./data/ontology.json', '状态') ontology.add_examples('./data/example_dict.json') data = Data(100, dictionary, ontology) data.add_raw('train', './data/train.json', 'window') data.add_raw('test', './data/test.json', 'window') data.add_raw('dev', './data/dev.json', 'window') # params of the model. params = { "add_global": args.add_global, "num_units": args.hidden_size, "num_layers": args.mlp_layer_num, "keep_p": args.keep_p } # Initialize the model. model = MIE(data, ontology, params=params)
def main(): print("================================") print(" AIStocksAgents-v1 ") print("================================") print("Select source .csv file (/full/path/to/file):") file = input().strip() print( "Select initial date of optimization period (same format as in .csv files):" ) initial_date = input().strip() print( "Select cut date of optimization period (same format as in .csv files):" ) cut_date = input().strip() print("Select end date of test period (same format as in .csv files):") end_date = input().strip() print("Select number of optimzation cycles:") ncycles = int(input().strip()) print("Select foder where results will be stored (/full/path/to/folder):") results = input().strip() n = 1000 # n: Number of agents s = 8 # s: Number of strategies per agent g_a = 60 # g_a: DGA rounds length g_m = 0.5 # g_m: Mutation rate g_p = 8 # g_p: Size of the communication pool p_max = 10 # p_max: Max position held by agent max_t = 20 df = dat.get_pandas_dataframe(file, 'Open').drop(columns=['index']) prices = list(df.Price.values) dates = list(df.Date.values) Mt = list(df.Mt.values) Dt = list(df.Dt.values) Yt = list(df.Yt.values) initial_idx = dates.index(initial_date) cut_idx = dates.index(cut_date) end_idx = dates.index(end_date) print('Initial date (index in series) is ' + initial_date + '(' + str(initial_idx) + ')') print('Cut date (index in series) is ' + cut_date + '(' + str(cut_idx) + ')') print('End date (index in series) is ' + end_date + '(' + str(end_idx) + ')') print('Lenght in timesteps of the optimization period: ' + str(cut_idx - initial_idx)) print('Lenght in timesteps of the test period: ' + str(end_idx - cut_idx)) alg = Alg.Algorithm(prices, Mt, Dt, Yt, n, s, g_a, g_m, g_p, p_max, ncycles, initial_idx, cut_idx, end_idx, max_t) start = timer() print('Starting the simulation...') pool, all_agents, statistics_opt, avg_wealth_t_test, avg_position_t_test, max_opt, min_opt = alg.run( ) end = timer() - start print('Duration of the execution ', f.normalize_seconds(end)) tmp = str(datetime.datetime.now()) directory = results + '/' + tmp os.mkdir(directory) last_price_opt = prices[cut_idx] last_price_test = prices[end_idx] p.save_histogram(all_agents, 'Number of agents by wealth - Histogram - Optimization', 25, directory + '/histogram-optimization-period.png') p.save_histogram([a.get_wealth(last_price_test) for a in pool], 'Number of agents by wealth - Histogram - Test', 25, directory + '/histogram-test-period.png') # Position + Price chart plt.rcParams["figure.figsize"] = (12, 8) fig, ax1 = plt.subplots() color = 'black' ax1.set_xlabel('Day') ax1.set_ylabel('Stcok') ax1.plot(prices[cut_idx:end_idx], color=color) ax2 = ax1.twinx() color = 'orange' ax2.set_ylabel('Average Position', color=color) ax2.plot(avg_position_t_test, color=color, linewidth=0.8, alpha=0.7) ax2.tick_params(axis='y', labelcolor=color) fig.tight_layout() plt.savefig(directory + '/position-price-test-chart.png') # Wealth + Price chart fig, ax1 = plt.subplots() color = 'black' ax1.set_xlabel('Day') ax1.set_ylabel('Stock') ax1.plot(prices[cut_idx:end_idx], color=color) ax2 = ax1.twinx() color = 'red' ax2.set_ylabel('Mean Wealth', color=color) ax2.plot(avg_wealth_t_test, color=color) ax2.tick_params(axis='y', labelcolor=color) fig.tight_layout() plt.savefig(directory + '/wealth-price-test-chart.png') test_result = [agent.get_wealth(last_price_test) for agent in pool] file = open(directory + '/report.txt', 'w') p.out_report(file, alg, statistics_opt, max_opt, min_opt, test_result, 'Stock')
parser.add_argument('-lr', '--lrate', type=float, help='learning rate', required=True) args = parser.parse_args() layers = [int(item) for item in args.layers.split(',')] sim_id = args.id lr = args.lrate myModel = CNN2DModel(num_gpus=1, sim_id=sim_id) myModel.build_model(blocks=layers) myModel.compile_model(lr=lr, verbose=True) myModel.build_callbacks('./logs' + str(sim_id)) #witout generators data = Data() X_data, Y_data = data.load_XY(pathX='./train/X_data.npz', pathY='./train/Y_data.npz') #load checkpoint myModel.load_checkpoint() myModel.fit_model(X_data, Y_data, epochs=80)
import os from timeit import default_timer as timer from src import Plotting as p import pickle n = 1000 # n: Numero de agentes s = 8 # s: Numero de estrategias por agente g_a = 60 # g_a: periodo de DGA g_m = 0.5 # g_m: ratio de mutacion g_p = 8 # g_p: tamao de la 'communication pool' p_max = 10 # p_max: pos max de cada agente ncycles = 100 # ncycles: Numero de ciclos en los que el algoritmo recorre el periodo de optimizacion max_t = 20 # Max value stock = 'NKY 225' df = Data.get_pandas_dataframe(r'datasets/NKY225.csv', 'Close').drop(columns=['index']) title = 'NKY 225' prices = list(df.Price.values) dates = list(df.Date.values) Mt = list(df.Mt.values) Dt = list(df.Dt.values) Yt = list(df.Yt.values) initial_date = '1993-03-17' cut_date = '2005-05-12' end_date = '2012-05-17' initial_idx = dates.index(initial_date) cut_idx = dates.index(cut_date) end_idx = dates.index(end_date)