Esempio n. 1
0
def randomize(graph):
    i = 0
    for n in graph.nodes():
        i += 1
        randomloc = float(random.randint(0, 999999999))/1000000000
        newnode = (randomloc,)
        neighbors = graph.neighbors(n)
        graph.add_node(newnode, id=i, ds=DataStore(100000))
        for neighbor in neighbors:
            graph.add_edge(newnode, neighbor)
        graph.remove_node(n)
Esempio n. 2
0
    def __init__(self, mentors=None, mentees=None, person_dict=None):
        self.ds = DataStore.DataStore()
        self.matching = None
        self.matching_list = None

        if mentors is None and mentees is None and person_dict is None:
            self.mentors, self.mentees, self.person_dict = self.ds.load_data(
                "initial_data_{}".format(global_vars.ROUND))
        else:
            self.mentors = mentors
            self.mentees = mentees
            self.person_dict = person_dict

        self.assignment_matrix = None
        self.now = datetime.now()  # current date and time
        self.now_str = self.now.strftime("%Y%m%d_%H_%M_%S")
Esempio n. 3
0
 def init_DS_list(self):
     """
     Init a list of empty DSs (Data Stores == caches)
     """
     self.DS_list = [
         DataStore.DataStore(ID=i,
                             size=self.DS_size,
                             bpe=self.bpe,
                             mr1_estimation_window=self.estimation_window,
                             max_fpr=self.max_fpr,
                             max_fnr=self.max_fnr,
                             verbose=self.verbose,
                             uInterval=self.uInterval,
                             num_of_insertions_between_estimations=self.
                             num_of_insertions_between_estimations)
         for i in range(self.num_of_DSs)
     ]
Esempio n. 4
0
if __name__ == "__main__":
    # Make sure this grid size matches the value used fro training

    batch_size = 25  # 50
    sequence_length = 250  # 500

    features_list = list(range(1, 33))  ## FULL
    features_list = list(range(1, 6))  ## SHORT!!

    training_days = 1
    testing_days = 1
    max_memory = 500000

    training_store = ds.DataStore(training_days=training_days,
                                  features_list=features_list,
                                  sequence_length=sequence_length)
    testing_store = ds.DataStore(training_days=training_days,
                                 testing_days=testing_days,
                                 features_list=features_list,
                                 sequence_length=sequence_length,
                                 mean=training_store.mean,
                                 std=training_store.std)

    features_length = training_store.get_features_length()

    env = Trading(data_store=testing_store,
                  sequence_length=sequence_length,
                  features_length=features_length)
    num_actions = env.get_action_count(
    )  # [sell, buy, flat] # get From TRADING!!
Esempio n. 5
0
    def execute(self):
        # parameters
        epsilon = .5  # exploration
        epsilon_decay = 0.95
        epsilon_min = 0.1

        epoch = 4000  # is number of cycles...
        max_memory = 2000  #  NEEDS TO BE AS BIG AS AT LEAST 1 TRADING DAY!!!

        batch_size = 50  # 50
        sequence_length = 250  # 500
        discount = 0.95

        training_days = 1
        testing_days = 1

        features_list = list(range(1, 33))  ## FULL
        features_list = list(range(1, 6))  ## SHORT!!

        training_store = ds.DataStore(training_days=training_days,
                                      features_list=features_list,
                                      sequence_length=sequence_length)
        features_length = training_store.get_features_length()
        env = Trading(data_store=training_store,
                      sequence_length=sequence_length,
                      features_length=features_length)

        num_actions = env.get_action_count(
        )  # [sell, buy, flat] # get From TRADING!!

        #testing_store = ds.DataStore(training_days=training_days, testing_days=10, features_list=features_list, sequence_length=sequence_length)

        mo = Models()
        rms = RMSprop(lr=0.0001, rho=0.9, epsilon=1e-06)

        use_ufcnn = True
        if use_ufcnn:
            model = mo.model_ufcnn_concat(sequence_length=sequence_length,
                                          features=features_length,
                                          nb_filter=15,
                                          filter_length=5,
                                          output_dim=num_actions,
                                          optimizer=rms,
                                          loss='mse',
                                          batch_size=batch_size,
                                          init="normal")
            base_model_name = "ufcnn"
        else:
            model = mo.atari_conv_model(output_dim=num_actions,
                                        features=features_length,
                                        loss='mse',
                                        sequence_length=sequence_length,
                                        optimizer=rms,
                                        batch_size=batch_size,
                                        init="normal")
            base_model_name = "atari"

        testing_store = ds.DataStore(training_days=training_days,
                                     testing_days=testing_days,
                                     features_list=features_list,
                                     sequence_length=sequence_length,
                                     mean=training_store.mean,
                                     std=training_store.std)

        test_env = Trading(data_store=testing_store,
                           sequence_length=sequence_length,
                           features_length=features_length)

        #model = mo.atari_conv_model(regression=False, output_dim=num_actions, features=features_length, nb_filter=50,
        #                           loss='mse', sequence_length=sequence_length, optimizer=rms, batch_size=batch_size)

        # If you want to continue training from a previous model, just uncomment the line bellow
        #mo.load_model("ufcnn_rl_training")

        # Define environment/game

        # Initialize experience replay object

        start_time = time.time()
        best_pnl = -99999.
        best_rndless_pnl = -99999.

        exp_replay = ExperienceReplay(max_memory=max_memory,
                                      env=env,
                                      sequence_dim=(sequence_length,
                                                    features_length),
                                      discount=discount)
        lineindex = 0

        # Train
        for e in range(epoch):
            loss = 0.
            game_over = False

            total_reward = 0

            win_cnt = 0
            loss_cnt = 0
            random_cnt = 0
            no_random_cnt = 0

            ### loop over days-...
            for i in range(training_days):
                input_t = env.reset()

                j = 0
                while not game_over:  # game_over ... end of trading day...
                    input_tm1 = input_t
                    #print("INPUT ",input_tm1)
                    # get next action
                    if np.random.rand() <= epsilon:
                        action = np.random.randint(0, num_actions, size=1)[0]
                        random_cnt += 1
                        #print("RANDOM")
                    else:
                        q = model.predict(exp_replay.resize_input(input_tm1))
                        action = np.argmax(q[0])
                        no_random_cnt += 1
                        #print("SELECT")
                        ##action = np.argmax(q)

                    # apply action, get rewards and new state
                    input_t, reward, game_over, idays, lineindex = env.act(
                        action)

                    if reward > 0:
                        win_cnt += 1

                    if reward < 0:
                        loss_cnt += 1

                    total_reward += reward
                    if reward > 1.:
                        reward = 1.

                    if reward < -1.:
                        reward = -1.

                    # store experience
                    exp_replay.remember([action, reward, idays, lineindex - 1],
                                        game_over)

                    # adapt model

                    if j > batch_size:  # do not run exp_rep if the store is empty...
                        inputs, targets = exp_replay.get_batch(
                            model, batch_size=batch_size)
                        curr_loss = model.train_on_batch(
                            exp_replay.resize_input(inputs), targets)
                        loss += curr_loss

                    j += 1

            rndless_pnl = self.get_randomless_pnl(test_env=test_env,
                                                  model=model,
                                                  testing_days=testing_days)

            secs = time.time() - start_time
            print(
                "Epoch {:05d}/{} | Time {:7.1f} | Loss {:11.4f} | Win trades {:5d} | Loss trades {:5d} | Total PnL {:8.2f} | Rndless PnL {:8.2f} | Eps {:.4f} | Rnd: {:5d}| No Rnd: {:5d}  "
                .format(e, epoch, secs, loss, win_cnt, loss_cnt, total_reward,
                        rndless_pnl, epsilon, random_cnt, no_random_cnt),
                flush=True)
            if epsilon > epsilon_min:
                epsilon *= epsilon_decay
            # Save trained model weights and architecture, this will be used by the visualization code

            if total_reward > best_pnl:
                mo.save_model(model, base_model_name + "_rl_best")
                best_pnl = total_reward
            else:
                mo.save_model(model, base_model_name + "_rl_training")

            if rndless_pnl > best_pnl:
                mo.save_model(model, base_model_name + "_rl_rndless_best")
                best_rndless_pnl = rndless_pnl
Esempio n. 6
0
import DataStore as ds

features_list = list(range(1, 33))  ## FULL
training_store = ds.DataStore(training_days=10, features_list=features_list)
print("XDF : ", training_store.Xdf)
print("XDF-len : ", training_store.Xdf.count())
print("STD : ", training_store.std)
print("Mean: ", training_store.mean)

print("XDF Mean: ", training_store.Xdf.mean())
print("XDF Std : ", training_store.Xdf.std())

testing_store = ds.DataStore(training_days=10,
                             testing_days=20,
                             features_list=features_list,
                             mean=training_store.mean,
                             std=training_store.std)
print("Test XDF: ", testing_store.Xdf)
print("Test XDF Mean: ", testing_store.Xdf.mean())
print("Test XDF Std : ", testing_store.Xdf.std())

print("A Sequence ", testing_store.get_sequence(1, 700))

numdays = testing_store.get_number_days()
for i in range(1, numdays):
    print("Day ", i, ", ", testing_store.get_day(i), ", Elements: ",
          testing_store.get_day_length(i))

print("TESTING GET SEQ")
print(testing_store.get_sequence(3, 888))