def check_examples(): config = Config("../../../configs/config.json") size = 120 # pixel_generator = generate_grayscale_pixels(generate_rbg_pixels(config["data_dir"] + "gifs/tenor.gif", window_size=size)) pixel_generator = generate_rbg_pixels(config["data_dir"] + "gifs/tenor.gif", window_size=size) for concurrent_examples in generate_pixel_examples(pixel_generator): time.sleep(.5)
def text_sequence(): c = Config("../configs/config.json") data_dir = c["data_dir"] + "Texts/" input_sequence = sequence_nominal_text(data_dir + "emma.txt") target_sequence = sequence_nominal_text(data_dir + "emma.txt") next(target_sequence) example_sequence = (input_sequence, target_sequence), return from_sequences(example_sequence)
def check_frames(): config = Config("../../../configs/config.json") size = 5 original = Image.open(config["data_dir"] + "gifs/tenor.gif") for no_frame, values in enumerate( generate_grayscale_pixels( generate_rbg_pixels(config["data_dir"] + "gifs/tenor.gif", window_size=size))): print(f"processing frame {no_frame}...") write_image(values, original.width // size, original.height // size, "{:03d}.png".format(no_frame))
def test_nominal_functionality(): # g = env_simple_nominal() config = Config("../configs/config.json") g = sequence_nominal_text(config["data_dir"] + "Texts/pride_prejudice.txt") example_sequence = [] last_shape = next(g) for this_shape in g: each_example = last_shape, this_shape example_sequence.append(each_example) last_shape = this_shape print(functionality_nominal(example_sequence))
def test_crypto_linear_functionality(): config = Config("../configs/config.json") g = sequence_rational_crypto( config["data_dir"] + "23Jun2017-23Jun2018-1m/EOSETH.csv", 60 * 60 * 24) examples = [] last_value = next(g) for this_value in g: each_example = last_value, this_value - last_value examples.append(each_example) last_value = this_value print(functionality_rational_linear(examples)) pyplot.plot(*zip(*examples)) pyplot.show()
def exchange_rate_sequence(start_stamp: int, end_stamp: int, look_ahead: int, in_cryptos: Sequence[str], out_crypto: str): c = Config("../configs/config.json") data_dir = c["data_dir"] + "binance/" interval_seconds = 60 inputs = tuple( sequence_rational_crypto(data_dir + "{:s}ETH.csv".format(_c.upper()), interval_seconds, start_val=start_stamp, end_val=end_stamp) for _c in in_cryptos) input_sequence = merge_iterators(inputs) targets = greater_or_equal_than_before( sequence_rational_crypto(data_dir + "{:s}ETH.csv".format(out_crypto.upper()), interval_seconds, start_val=start_stamp, end_val=end_stamp), .02, look_ahead) target_sequence = ((_x, ) for _x in targets) example_sequences = (input_sequence, target_sequence), return from_sequences(example_sequences)
# coding=utf-8 from _framework.setup import Setup from _framework.streams.linear.rational.implementations.exchange_rate_stream import ExchangeRateStream from _framework.systems.predictors.rational.implementations.rational_average_predictor import RationalAverage from _framework.systems.predictors.rational.implementations.rational_regression_predictor import RationalLinearRegression from _framework.systems.predictors.rational.implementations.rational_semiotic_predictor import RationalSemiotic from tools.load_configs import Config if __name__ == "__main__": config = Config("../../configs/config.json") path_dir = config["data_dir"] + "binance/" history_length = 1 input_file_paths = path_dir + "EOSETH.csv", path_dir + "SNTETH.csv", path_dir + "QTUMETH.csv", path_dir + "BNTETH.csv" target_file_paths = path_dir + "BNBETH.csv", input_dimensions = history_length * len(input_file_paths) output_dimensions = len(target_file_paths) exchange_parameters = { "input_file_paths": input_file_paths, "target_file_paths": target_file_paths, "start_time": "2017-08-09T09:00:00+00:00", "end_time": "2018-07-25T08:30:00+00:00", "interval_seconds": 60, "offset_seconds": 60 * 60 * 1, "history_length": history_length, } experiments = ( {
def controlled_grid_interaction(predictor: Predictor, iterations: int = 500000): c = Config("../configs/config.json") data_dir = c["data_dir"] + "grid_worlds/" grid_world = GridWorldLocal(data_dir + "square.txt", rotational=True) # grid_world = GridWorldLocal(data_dir + "simple.txt", rotational=False) # grid_world = GridWorldGlobal(data_dir + "sutton.txt", rotational=False) controller = SarsaController(grid_world.get_motor_range(), alpha=.8, gamma=.1, epsilon=.1) # controller = RandomController(grid_world.get_motor_range()) last_sensor = None last_motor = None sensor, reward = grid_world.react_to(None) visualization_steps = iterations // 1000 average_reward = .0 average_error = .0 average_duration = .0 for t in range(iterations): # get data this_time = time.time() concurrent_inputs = (last_sensor, last_motor), concurrent_outputs = predictor.predict(concurrent_inputs) concurrent_targets = sensor, concurrent_examples = (concurrent_inputs[0], concurrent_targets[0]), predictor.fit(concurrent_examples) d = time.time() - this_time # query controller perception = predictor.get_state(), last_sensor motor = controller.react_to(perception, reward) error = sum( float(_o != _t) for _o, _t in zip(concurrent_outputs, concurrent_targets)) / len( concurrent_targets) average_reward = (average_reward * t + reward) / (t + 1) average_error = (average_error * t + error) / (t + 1) average_duration = (average_duration * t + d) / (t + 1) if (t + 1) % visualization_steps == 0: Visualize.append("reward", predictor.__class__.__name__, average_reward) Visualize.append("error", predictor.__class__.__name__, average_error) Visualize.append("duration", predictor.__class__.__name__, average_duration) last_sensor = sensor last_motor = motor sensor, reward = grid_world.react_to(motor) if Timer.time_passed(2000): print("Finished {:05.2f}%...".format(100. * t / iterations)) Visualize.finalize("reward", predictor.__class__.__name__) Visualize.finalize("error", predictor.__class__.__name__) Visualize.finalize("duration", predictor.__class__.__name__)
def experiment(iterations: int = 500): size = 120 out_dim = 1 no_ex = 4 plots = { "error train": { RationalSemioticModel.__name__, Regression.__name__, MovingAverage.__name__ }, "error test": { RationalSemioticModel.__name__, Regression.__name__, MovingAverage.__name__ }, "duration": { RationalSemioticModel.__name__, Regression.__name__, MovingAverage.__name__ } } outputs_train = { f"output train{_o:02d}/{_e:02d}": { RationalSemioticModel.__name__, Regression.__name__, MovingAverage.__name__, "target train" } for _o in range(out_dim) for _e in range(no_ex) } outputs_test = { f"output test{_o:02d}/{_e:02d}": { RationalSemioticModel.__name__, Regression.__name__, MovingAverage.__name__, "target test" } for _o in range(out_dim) for _e in range(no_ex) } plots.update(outputs_train) plots.update(outputs_test) Visualize.init("gif", plots, x_range=iterations, refresh_rate=40) config = Config("../configs/config.json") predictor = RationalSemioticModel(input_dimension=1, output_dimension=out_dim, no_examples=no_ex, alpha=100, sigma=.5, drag=100, trace_length=1) pixel_generator = generate_grayscale_pixels( generate_rbg_pixels(config["data_dir"] + "gifs/tenor.gif", window_size=size)) train_streams = generate_pixel_examples(pixel_generator) test_streams = generate_pixel_examples(pixel_generator) setup(predictor, train_streams, test_streams, 1, iterations=iterations) for _each_output in outputs_train: Visualize.finalize(_each_output, "target train") for _each_output in outputs_test: Visualize.finalize(_each_output, "target test") print("Generating regression model...") predictor = Regression(input_dimension=1, output_dimension=out_dim, drag=100, no_examples=no_ex) pixel_generator = generate_grayscale_pixels( generate_rbg_pixels(config["data_dir"] + "gifs/tenor.gif", window_size=size)) train_streams = generate_pixel_examples(pixel_generator) test_streams = generate_pixel_examples(pixel_generator) setup(predictor, train_streams, test_streams, 1, iterations=iterations) for _each_output in outputs_train: Visualize.finalize(_each_output, "target train") for _each_output in outputs_test: Visualize.finalize(_each_output, "target test") print("Generating average model...") predictor = MovingAverage(output_dimension=out_dim, drag=100, no_examples=no_ex) pixel_generator = generate_grayscale_pixels( generate_rbg_pixels(config["data_dir"] + "gifs/tenor.gif", window_size=size)) train_streams = generate_pixel_examples(pixel_generator) test_streams = generate_pixel_examples(pixel_generator) setup(predictor, train_streams, test_streams, 1, iterations=iterations) for _each_output in outputs_train: Visualize.finalize(_each_output, "target train") for _each_output in outputs_test: Visualize.finalize(_each_output, "target test") print("done!") Visualize.show()