예제 #1
0
def myMainFunc(market, hidden_size, l2Reg, Files_Folder, learning_rate_grid, epoch_grid):

    # objective function and optimizer
    objective = sharpeLoss
    curr_optimizer = tf.train.AdamOptimizer

    # data parameters
    lookback = 30
    lookahead = 1
    rolling_sd_window = 100
    network_activation = tf.nn.tanh
    test_start_date = 20070418
    random_start_indicies = np.arange(1, 11, 1)
    nRandom_start = len(random_start_indicies)
    batch_size = 100

    # loading data
    datadir = 'C:/behrouz/Projects/DailyModels_new/NeuralNet/tf-SQ-only/data/%s_Commision-and-Slippage-limits.csv'

    data = pd.read_csv(datadir % market)

    curr_market_data = \
        dataProcessing.time_series_toMatrix(data,  test_start_date,
                                            lookback=lookback,
                                            look_ahead=lookahead, sd_window=rolling_sd_window)

    train = curr_market_data[:4]
    test = curr_market_data[4:]

    total_batches = train[0].shape[0] // batch_size
    decay_steps = total_batches
    decay_rate = 0.99

    for LR in learning_rate_grid:

        for training_epochs in epoch_grid:

            market_trainPred = np.zeros((train[0].shape[0], nRandom_start + 2))
            market_testPred = np.zeros((test[0].shape[0], nRandom_start + 2))
            total_loss_matrix = np.zeros((nRandom_start, 6))


            market_trainPred[:, 0] = train[3]  # date
            market_trainPred[:, 1] = train[2]  # 1 day return
            market_testPred[:, 0] = test[3]
            market_testPred[:, 1] = test[2]

            for R in range(len(random_start_indicies)):
                print('Hidden Size =', hidden_size, 'Learning rate=', LR,
                      'TrainingEpochs=', training_epochs, 'L2 Reg=', l2Reg, 'Random Start=', R)

                weights = {

                     'h1': initializers.xavier_from_tf_initializer([lookback, hidden_size], name='W_1'),
                     'out': initializers.xavier_from_tf_initializer([hidden_size, 1], name='W_out')
                         }
                biases = {

                    'b1': initializers.bias_initializer([hidden_size], name='B_1')
                    #, 'out': initializers.bias_initializer([1], name='B_out')
                        }
                # placeholders
                x = tf.placeholder(tf.float32, [None, lookback])
                y = tf.placeholder(tf.float32, [None])

                optimizer, output, sharpe_loss, sharpe_plus_l2_loss, l2Loss, ema = \
                    MLP_1layerFixedOutBias(x, y, weights, biases, curr_optimizer,
                                           objective, network_activation, l2Reg, l2Reg,
                                           LR, decay_steps, decay_rate)

                # Getting EMA var names:
                ema_dict = {}
                for var in tf.trainable_variables():
                    ema_var_name = ema.average_name(var)
                    ema_dict[ema_var_name] = var
                saver = tf.train.Saver(ema_dict)

                #saver = tf.train.Saver()

                with tf.Session() as sess:
                    try:
                        source_model_loc = 'C:/behrouz/Projects/DailyModels_new/NeuralNet/' \
                                           '/tf-SQ-only/%s/' % Files_Folder

                        saver.restore(sess,
                                      source_model_loc + 'MLP-checkpointFiles/' + str(R + 1) +
                                      '/run%d-s-%d-LR-%.6f-epoch-%d-l2-%.5f.ckpt'
                                      % (R + 1, hidden_size, LR, training_epochs, l2Reg))
                        #print(weights['h1'].eval())

                    except IOError:
                        print('Could not find the checkpoint file, filling with previous model..')

                    trainPred, train_loss, train_total_loss, trainL2_loss = \
                        sess.run([output, sharpe_loss, sharpe_plus_l2_loss, l2Loss],
                                 feed_dict={x: train[0], y: train[1]})
                    trainPred = trainPred[:, 0]

                    testPred, test_loss, test_total_loss, test_l2_loss = \
                        sess.run([output, sharpe_loss, sharpe_plus_l2_loss, l2Loss],
                                 feed_dict={x: test[0], y: test[1]})

                    testPred = testPred[:, 0]

                    market_trainPred[:, R + 2] = trainPred
                    market_testPred[:, R + 2] = testPred
                    total_loss_matrix[R, 0:3] = train_loss, trainL2_loss, train_total_loss
                    total_loss_matrix[R, 3:] = test_loss, test_l2_loss, test_total_loss

                tf.reset_default_graph()

            total_loss_matrix_colnames = ['train_loss', 'train_l2_loss', 'train_total_loss',
                                          'test_loss', 'test_l2_loss', 'test_total_loss']
            total_loss_matrix = pd.DataFrame(total_loss_matrix, columns=total_loss_matrix_colnames)

            total_loss_matrix.to_csv('./Results/%s-loss-s-%d-LR-%.6f-epoch-%d-l2-%.5f.csv'
                                     % (market, hidden_size, LR, training_epochs, l2Reg),
                                     index=False)

            predsCols = ['dtStart', '%s-y-true' % market]
            predsCols.extend(['%s-pred%d' % (market, j) for j in range(1, nRandom_start + 1, 1)])

            market_trainPred = pd.DataFrame(market_trainPred, columns=predsCols)
            market_trainPred.to_csv('./Results/%s-trainPreds-s1-%d-LR-%.6f-epoch-%d-l2-%.5f.csv'
                                    % (market, hidden_size, LR, training_epochs, l2Reg),
                                    index=False)

            market_testPred = pd.DataFrame(market_testPred, columns=predsCols)
            market_testPred.to_csv('./Results/%s-testPreds-s1-%d-LR-%.6f-epoch-%d-l2-%.5f.csv'
                                   % (market, hidden_size, LR, training_epochs, l2Reg),
                                   index=False)
예제 #2
0
test_basket_sharpes_array = np.zeros(training_epochs)

train_indiv_sharpes = np.zeros((len(markets), training_epochs, 3))
test_indiv_sharpes = np.zeros((len(markets), training_epochs, 3))

for R in range(nRandom_start):

    print('RUN %d optimization begins..' % (R + 1))

    weights = {
        'out':
        initializers.xavier_from_tf_initializer([2 * hidden2_size, 1],
                                                name='W_out')
    }

    biases = {'out': initializers.bias_initializer([1], name='B_out')}

    x = tf.placeholder(tf.float32, [None, input_feats])
    y = tf.placeholder(tf.float32, [None])
    learning_rate = tf.placeholder(tf.float32)
    keep_prob = tf.placeholder(tf.float32)

    optimizer, output, sharpe_plus_l2_loss = bidirectional_attention_LSTM(
        x,
        y,
        weights,
        biases,
        keep_prob,
        curr_optimizer,
        learning_rate,
        objective,
예제 #3
0
def myMainFunc(rand_start, hidden_size, l2Reg, learning_rate_grid, epoch_grid):

    #random.seed(seeds[rand_start - 1])
    #np.random.seed(seeds[rand_start - 1])
    #tf.set_random_seed(seeds[rand_start - 1])
    market = 'SQ'

    if not os.path.exists('./MLP-checkpointFiles/' + str(rand_start)):
        os.makedirs('./MLP-checkpointFiles/' + str(rand_start))

    # objective function
    objective = sharpeLoss
    curr_optimizer = tf.train.AdamOptimizer
    network_activation = tf.nn.tanh

    # data parameters
    lookback = 30
    lookahead = 1
    rolling_sd_window = 100

    # training parameters:
    batch_size = 100
    test_start_date = 20070418

    patience = 20  # stop training if  train loss does not improve after 20 epochs
    counter = 0
    best_train_loss = np.inf

    # loading data
    datadir = 'C:/behrouz/Projects/DailyModels_new/NeuralNet/tf-SQ-only/data/%s_Commision-and-Slippage-limits.csv'

    # get the common dates and then merge each data making sure they have common dates:
    data = pd.read_csv(datadir % market)

    curr_market_data = \
            dataProcessing.time_series_toMatrix(data,  test_start_date, lookback = lookback,
                                                look_ahead = lookahead, sd_window = rolling_sd_window)
    trainX, trainY, train_ret_1day, train_dates = curr_market_data[:4]

    total_batches = trainX.shape[0] // batch_size
    rem = trainX.shape[0] % batch_size

    decay_steps = total_batches
    decay_rate = 1.0

    for LR in learning_rate_grid:

        for training_epochs in epoch_grid:
            print('Hidden Size =', hidden_size, 'Learning rate=', LR,
                  'TrainingEpochs=', training_epochs, 'L2 Reg=', l2Reg)

            weights = {
                'h1':
                initializers.xavier_from_tf_initializer(
                    [lookback, hidden_size], name='W_1'),
                'out':
                initializers.xavier_from_tf_initializer([hidden_size, 1],
                                                        name='W_out')
            }

            biases = {
                'b1': initializers.bias_initializer([hidden_size], name='B_1')
                #, 'out': initializers.bias_initializer([1], name='B_out')
            }
            # placeholders
            x = tf.placeholder(tf.float32, [None, lookback])
            y = tf.placeholder(tf.float32, [None])

            train_op, output, sharpe_plus_l2_loss, classification_loss = MLP_1layer_fixedBiasOut_sigmoid(
                x, y, weights, biases, curr_optimizer, objective,
                network_activation, l2Reg, l2Reg, LR, decay_steps, decay_rate)

            # initialize all tensors- to be run in Session!
            init = tf.global_variables_initializer()

            # saver for restoring the whole model graph of tensors from the  checkpoint file
            saver = tf.train.Saver()

            # launch default graph:
            with tf.Session() as sess:

                sess.run(init)

                # training cycle:
                for epoch in range(training_epochs):

                    # shuffle the training data at the beginning of each epoch!
                    # for now I turn this off to get very consistent results
                    a = np.arange(trainX.shape[0])
                    np.random.shuffle(a)
                    trainX = trainX[a, :]
                    trainY = trainY[a]
                    # loop over all batches:
                    for batch_number in range(total_batches):

                        if (batch_number + 1) == total_batches and rem != 0:
                            xBatch = trainX[(total_batches - 1) * batch_size +
                                            rem:, :]
                            trainY_batch = trainY[(total_batches - 1) *
                                                  batch_size + rem:]

                        else:
                            xBatch = trainX[batch_number *
                                            batch_size:(batch_number + 1) *
                                            batch_size, :]
                            trainY_batch = trainY[batch_number *
                                                  batch_size:(batch_number +
                                                              1) * batch_size]

                        # run optimization
                        _ = sess.run(train_op,
                                     feed_dict={
                                         x: xBatch,
                                         y: trainY_batch
                                     })

                    #curr_loss = sess.run(sharpe_plus_l2_loss, feed_dict={x: trainX, y: trainY})
                    curr_loss, curr_classification_loss = sess.run(
                        [sharpe_plus_l2_loss, classification_loss],
                        feed_dict={
                            x: trainX,
                            y: trainY
                        })
                    print('=' * 20)
                    print('Epoch=', epoch, 'Current Train Loss=', curr_loss,
                          'Best Train Loss=', best_train_loss)
                    print('Epoch=', epoch, 'Classification Loss=',
                          curr_classification_loss)

                    if curr_loss < best_train_loss:
                        counter = 0
                        best_train_loss = curr_loss
                        saver.save(
                            sess, './MLP-checkpointFiles/' + str(rand_start) +
                            '/run%d-s-%d-LR-%.6f-epoch-%d-l2-%.5f.ckpt' %
                            (rand_start, hidden_size, LR, training_epochs,
                             l2Reg))

                    else:
                        counter += 1
                    if counter >= patience:
                        break

            # resetting the graph to be built again in the next iteration of for loop
            tf.reset_default_graph()
    print('RUN %d optimization begins..' % (j + 1))
    weights = {
        'h1':
        initializers.tanh_uniform_weight_initializer(
            [input_feats, hidden1_size]),
        'h2':
        initializers.tanh_uniform_weight_initializer(
            [hidden1_size, hidden2_size]),
        'out':
        initializers.tanh_uniform_weight_initializer([hidden2_size, 1],
                                                     name='W_out')
    }

    biases = {
        'b1': initializers.bias_initializer([hidden1_size]),
        'b2': initializers.bias_initializer([hidden2_size]),
        'out': initializers.bias_initializer([1], name='B_out')
    }

    x = tf.placeholder(tf.float32, [None, input_feats])
    y = tf.placeholder(tf.float32, [None])
    learning_rate = tf.placeholder(tf.float32)

    optimizer, output, sharpe_plus_l2_loss = MLP(x,
                                                 y,
                                                 weights,
                                                 biases,
                                                 tf.train.AdamOptimizer,
                                                 learning_rate,
                                                 activation=tf.nn.tanh,
예제 #5
0
def myparralelFunc(LR_gird, l2Reg, dates, objective, results_path):
    for i in range(0, len(markets), 1):
        data = pd.read_csv(datadir % markets[i])
        # Make sure we get data from all  markets on exact common dates
        data = pd.merge(data, dates, on='dtStart', how='inner')
        curr_market_data = dataProcessing.time_series_toMatrix(
            data, date_train_end)
        if i == 0:
            trainX = curr_market_data[0]
            trainY = curr_market_data[1]
            train_Y_op_opD = curr_market_data[2]
            train_dates = curr_market_data[3]
            testX = curr_market_data[4]
            testY = curr_market_data[5]
            test_Y_op_opD = curr_market_data[6]
            test_dates = curr_market_data[7]
        else:
            trainX = np.append(trainX,
                               copy.deepcopy(curr_market_data[0]),
                               axis=0)
            trainY = np.dstack((trainY, copy.deepcopy(curr_market_data[1])))
            train_Y_op_opD = np.dstack(
                (train_Y_op_opD, copy.deepcopy(curr_market_data[2])))
            testX = np.append(testX,
                              copy.deepcopy(curr_market_data[4]),
                              axis=0)
            testY = np.dstack((testY, copy.deepcopy(curr_market_data[5])))
            test_Y_op_opD = np.dstack(
                (test_Y_op_opD, copy.deepcopy(curr_market_data[6])))

    trainY = np.transpose(trainY, [2, 1, 0])
    trainY = np.reshape(trainY, trainY.shape[:2])
    train_Y_op_opD = np.transpose(train_Y_op_opD, [2, 1, 0])
    train_Y_op_opD = np.reshape(train_Y_op_opD, train_Y_op_opD.shape[:2])
    testY = np.transpose(testY, [2, 1, 0])
    testY = np.reshape(testY, testY.shape[:2])
    test_Y_op_opD = np.transpose(test_Y_op_opD, [2, 1, 0])
    test_Y_op_opD = np.reshape(test_Y_op_opD, test_Y_op_opD.shape[:2])

    print(trainX.shape, trainY.shape)
    print(testX.shape, testY.shape)
    print('====')
    train_loss_mat = np.zeros((len(LR_gird), training_epochs))
    for i in range(len(LR_gird)):
        init_lr = LR_gird[i]
        random.seed(12345)
        np.random.seed(12345)
        tf.set_random_seed(12345)

        weights = {
            'out':
            initializers.xavier_from_tf_initializer([hidden1_size, 1],
                                                    name='W_out')
        }
        biases = {'out': initializers.bias_initializer([1], name='B_out')}
        # placeholders
        x = tf.placeholder(tf.float32, [len(markets), None, features])
        y = tf.placeholder(tf.float32, [len(markets), None])
        learning_rate = tf.placeholder(tf.float32)
        keep_prob = tf.placeholder(tf.float32)

        optimizer, output, sharpe_plus_l2_loss = vanilla_RNN(
            x,
            y,
            weights,
            biases,
            keep_prob,
            curr_optimizer,
            learning_rate,
            objective,
            markets=markets,
            activation=tf.nn.tanh,
            l2Reg=l2Reg,
            hidden_size=hidden1_size,
            n_layers=1)
        # initialize all tensors- to be run in Session!
        init = tf.global_variables_initializer()
        # saver for restoring the whole model graph of
        #  tensors from the  checkpoint file
        saver = tf.train.Saver()

        # launch default graph:
        with tf.Session() as sess:
            sess.run(init)
            # training cycle:
            decay_exponent = 1
            for epoch in range(training_epochs):
                if epoch >= stationary_epochs:
                    LR = init_lr * (decya_fact**decay_exponent)
                    decay_exponent += 1
                    feed_dict = {
                        x: trainX,
                        y: train_Y_op_opD,
                        learning_rate: LR,
                        keep_prob: dropout
                    }
                else:
                    feed_dict = {
                        x: trainX,
                        y: train_Y_op_opD,
                        learning_rate: init_lr,
                        keep_prob: dropout
                    }

                _ = sess.run(optimizer, feed_dict=feed_dict)

                train_loss = sess.run(sharpe_plus_l2_loss,
                                      feed_dict={
                                          x: trainX,
                                          y: train_Y_op_opD,
                                          keep_prob: 1.
                                      })
                print('L2 reg=', l2Reg, 'Epoch=', epoch, 'TrainLoss= ',
                      train_loss)
                train_loss_mat[i, epoch] = train_loss

            saver.save(
                sess, results_path + '/checkpointFiles/' + str(l2Reg) +
                '/checkPoint-LR-%.6f-l2-%.4f.ckpt' % (init_lr, l2Reg))

    # resetting the graph to be built again in the next iteration of for loop
        tf.reset_default_graph()
    return train_loss_mat
예제 #6
0
                        weights = {
                            'h1':
                            initializers.xavier_from_tf_initializer(
                                [lookback, hidden_size], name='W_1'),
                            'h2':
                            initializers.xavier_from_tf_initializer(
                                [hidden_size, hidden_size], name='W_2'),
                            'out':
                            initializers.xavier_from_tf_initializer(
                                [hidden_size, 1], name='W_out')
                        }

                        biases = {
                            'b1':
                            initializers.bias_initializer([hidden_size],
                                                          name='B_1'),
                            'b2':
                            initializers.bias_initializer([hidden_size],
                                                          name='B_2'),
                            'out':
                            initializers.bias_initializer([1], name='B_out')
                        }

                        x = tf.placeholder(tf.float32, [None, input_feats])
                        y = tf.placeholder(tf.float32, [None])
                        learning_rate = tf.placeholder(tf.float32)
                        keep_prob = tf.placeholder(tf.float32)

                        optimizer, output, sharpe_plus_l2_loss = \
                            MLP(x, y, weights, biases, keep_prob, curr_optimizer, learning_rate, objective,
                                batch_size=batch_size, markets=markets,
def train_parallel(l2Reg, init_lr, results_path, objective, train_data,
                   test_data, transCost_dict):
    display_steps = 10
    total_evals = int(training_epochs // display_steps)
    for R in range(nRandom_start):
        ind = 0
        train_basket_sharpes_array = np.zeros(total_evals)
        test_basket_sharpes_array = np.zeros(total_evals)

        train_indiv_sharpes = np.zeros((len(markets), total_evals, 3))
        test_indiv_sharpes = np.zeros((len(markets), total_evals, 3))
        print('RUN %d optimization begins..' % (R + 1))
        weights = {
            'out':
            initializers.xavier_from_tf_initializer([hidden1_size, 1],
                                                    name='W_out')
        }
        biases = {'out': initializers.bias_initializer([1], name='B_out')}
        x = tf.placeholder(tf.float32, [len(markets), None, features])
        y = tf.placeholder(tf.float32, [len(markets), None])
        learning_rate = tf.placeholder(tf.float32)
        keep_prob = tf.placeholder(tf.float32)

        optimizer, output, sharpe_plus_l2_loss = vanilla_RNN(
            x,
            y,
            weights,
            biases,
            keep_prob,
            curr_optimizer,
            learning_rate,
            objective,
            markets=markets,
            activation=tf.nn.tanh,
            l2Reg=l2Reg,
            hidden_size=hidden1_size,
            n_layers=n_layers)
        # initialize all tensors- to be run in Session!
        init = tf.global_variables_initializer()
        # saver for restoring the whole model graph of
        #  tensors from the  checkpoint file
        saver = tf.train.Saver()
        # launch default graph:
        with tf.Session() as sess:
            sess.run(init)
            # training cycle:
            decay_exponent = 1
            for epoch in range(training_epochs):
                if epoch >= stationary_epochs:
                    LR = init_lr * (decya_fact**decay_exponent)
                    decay_exponent += 1
                    feed_dict = {
                        x: train_data[0],
                        y: train_data[2],
                        learning_rate: LR,
                        keep_prob: dropout
                    }
                else:
                    LR = init_lr
                    feed_dict = {
                        x: train_data[0],
                        y: train_data[2],
                        learning_rate: LR,
                        keep_prob: dropout
                    }
                _ = sess.run(optimizer, feed_dict=feed_dict)
                ######################################################################################################
                if epoch % display_steps == 0:
                    train_preds_all_markets = sess.run(output,
                                                       feed_dict={
                                                           x: train_data[0],
                                                           keep_prob: 1.0
                                                       })
                    test_preds_all_markets = sess.run(output,
                                                      feed_dict={
                                                          x: test_data[0],
                                                          keep_prob: 1.0
                                                      })

                    train_basket_sharpe = losses_and_metrics.basket_trading_sharpeMetrics(
                        train_preds_all_markets, train_data[1], markets,
                        transCost_dict)
                    train_basket_sharpes_array[ind] = train_basket_sharpe
                    test_basket_sharpe = losses_and_metrics.basket_trading_sharpeMetrics(
                        test_preds_all_markets, test_data[1], markets,
                        transCost_dict)
                    test_basket_sharpes_array[ind] = test_basket_sharpe
                    print('EPOCH %d- learning rate=' % epoch,
                          LR, 'Train basket sharpe=',
                          round(train_basket_sharpe, 3), 'Test Basket Sharpe=',
                          round(test_basket_sharpe, 3))
                    # Individual market sharpes :training data:
                    for m in range(len(markets)):
                        train_indiv_sharpes[
                            m, ind, :] = losses_and_metrics.sharpe_likeModo(
                                train_preds_all_markets[m, :],
                                train_data[1][m, :],
                                transCost_dict[markets[m]])
                        test_indiv_sharpes[
                            m, ind, :] = losses_and_metrics.sharpe_likeModo(
                                test_preds_all_markets[m, :],
                                test_data[1][m, :], transCost_dict[markets[m]])
                    ind += 1
            print(' saving model graph of all tensors to file')
            saver.save(
                sess, results_path + str(l2Reg) +
                '/checkpointFiles/checkPoint-run%d.ckpt' % (R + 1))
            print('Optimization finished!')
            # resetting the graph to be built again in the next iteration of for loop
        tf.reset_default_graph()
        fig = plot_sharpes(train_basket_sharpes_array,
                           test_basket_sharpes_array, train_indiv_sharpes,
                           test_indiv_sharpes)
        fig.savefig(results_path + str(l2Reg) +
                    '/Learning-dynamics-plot/%d-N-l2Reg-%.3f-run%d.png' %
                    (hidden1_size, l2Reg, R + 1),
                    bbox_inches='tight')
        plt.close()
예제 #8
0
def myMainFunc(random_start_indicies):
    markets = ('SQ', 'MQ', 'NQ', 'DQ', 'RN')

    # objective function
    # objective = losses_and_metrics.sum_sharpeLoss
    objective = losses_and_metrics.sharpeLoss
    # objective = losses_and_metrics.basket_trading_pos_size_sharpeLoss

    curr_optimizer = tf.train.AdamOptimizer
    # curr_optimizer = tf.train.RMSPropOptimizer

    # data parameters
    Y_toUse = 1  # 1: scaled return, 2:1-day return
    lookback = 30
    lookahead = 1
    rolling_sd_window = 100

    # training parameters:

    batch_size = 100
    # network parameters:
    network_activation = tf.nn.tanh
    dropout = 1.

    input_feats = lookback
    test_start_date = 20070418

    hidden_size_grid = [5, 10, 15, 20]
    learning_rate_grid = [0.001, 0.0005, 0.0001, 0.00005]
    epoch_grid = [50, 100, 150, 200, 300]
    l2_grid = [0, 0.01, 0.1, 1, 5]
    valid_frac = 0.2

    # loading data
    datadir = 'C:/behrouz/Projects/DailyModels_new/NeuralNet/hyper-param-optimization/tf-hyperParam-opt/data/%s_Commision-and-Slippage-limits.csv'

    # get the common dates and then merge each data making sure they have common dates:

    data = pd.read_csv(datadir % markets[0])
    for i in range(1, len(markets), 1):
        data1 = pd.read_csv(datadir % markets[i])
        data = pd.merge(data, data1, on='dtStart', how='inner')

    dates = data[['dtStart']]

    # getting random piece but common indicies from train  as validation

    test_start_ind = int(np.where(dates.values == test_start_date)
                         [0]) - rolling_sd_window - lookback - lookahead
    inds = np.arange(test_start_ind)

    valid_inds = pd.read_csv('Validation_indicies.csv').values
    valid_inds = valid_inds.flatten()

    #valid_inds = np.random.choice(inds, size=int(valid_frac * test_start_ind), replace=False)
    #valid_inds = np.sort(valid_inds)
    # writing validation indicies to file
    # valid_inds_df = pd.DataFrame(valid_inds)
    #valid_inds_df.to_csv('Validation_indicies.csv', index=False)
    train_inds = [i for i in inds if i not in valid_inds]
    test_dict = {}
    train_dict = {}
    validation_dict = {}

    for i in range(0, len(markets), 1):
        data = pd.read_csv(datadir % markets[i])

        # Make sure we get data from all  markets on exact common dates
        data = pd.merge(data, dates, on='dtStart', how='inner')

        curr_market_data = \
            dataProcessing.time_series_toMatrix(data, train_inds, valid_inds , 20070418, lookback = lookback,
                                                look_ahead = lookahead, sd_window = rolling_sd_window)

        train_dict[markets[i]] = copy.deepcopy(curr_market_data[:4])
        validation_dict[markets[i]] = copy.deepcopy(curr_market_data[4:8])
        test_dict[markets[i]] = copy.deepcopy(curr_market_data[8:])

    total_batches = train_dict[markets[0]][0].shape[0] // batch_size
    rem = train_dict[markets[0]][0].shape[0] % batch_size

    print('TOTAL BATCHES+++++++++++++++++++++++', total_batches)

    for R in random_start_indicies:
        print('RUN %d optimization begins..' % R)

        for hidden_size in hidden_size_grid:

            for LR in learning_rate_grid:

                for training_epochs in epoch_grid:

                    for l2Reg in l2_grid:

                        print('Hidden Size =', hidden_size, 'Learning rate=',
                              LR, 'TrainingEpochs=', training_epochs,
                              'L2 Reg=', l2Reg)

                        weights = {
                            'h1':
                            initializers.xavier_from_tf_initializer(
                                [lookback, hidden_size], name='W_1'),
                            'h2':
                            initializers.xavier_from_tf_initializer(
                                [hidden_size, hidden_size], name='W_2'),
                            'out':
                            initializers.xavier_from_tf_initializer(
                                [hidden_size, 1], name='W_out')
                        }

                        biases = {
                            'b1':
                            initializers.bias_initializer([hidden_size],
                                                          name='B_1'),
                            'b2':
                            initializers.bias_initializer([hidden_size],
                                                          name='B_2'),
                            'out':
                            initializers.bias_initializer([1], name='B_out')
                        }
                        # placeholders
                        x = tf.placeholder(tf.float32, [None, input_feats])
                        y = tf.placeholder(tf.float32, [None])
                        learning_rate = tf.placeholder(tf.float32)
                        keep_prob = tf.placeholder(tf.float32)

                        optimizer, output, sharpe_plus_l2_loss = \
                            MLP_1layer(x, y, weights, biases, keep_prob, curr_optimizer, learning_rate, objective,
                                batch_size=batch_size,
                                markets=markets,
                                activation=network_activation, l2Reg=l2Reg, l2RegOutput=l2Reg * 1.,
                                l2Reg_biases=l2Reg * 1.)

                        # initialize all tensors- to be run in Session!

                        init = tf.global_variables_initializer()

                        # saver for restoring the whole model graph of
                        #  tensors from the  checkpoint file

                        saver = tf.train.Saver()

                        # launch default graph:
                        with tf.Session() as sess:

                            sess.run(init)

                            # training cycle:
                            for epoch in range(training_epochs):

                                # shuffle the training data at the begining of each epoch!

                                curr_train_dict = dataProcessing.shuffle_train_dict(
                                    train_dict, markets)

                                # loop over all batches:
                                for batch_number in range(total_batches):
                                    xBatch, trainY_batch = dataProcessing.next_batch_dict(
                                        curr_train_dict, batch_number,
                                        batch_size, rem, Y_toUse,
                                        total_batches, markets)
                                    # run optimization

                                    _ = sess.run(optimizer,
                                                 feed_dict={
                                                     x: xBatch,
                                                     y: trainY_batch,
                                                     learning_rate: LR,
                                                     keep_prob: dropout
                                                 })

                            #print(' Optimization finished! saving model graph of all tensors to file')

                            save_path = saver.save(
                                sess,
                                './MLP-checkpointFiles/run%d-s-%d-LR-%.6f-epoch-%d-l2-%.5f.ckpt'
                                % (R, hidden_size, LR, training_epochs, l2Reg))

                        # resetting the graph to be built again in the next iteration of for loop

                        tf.reset_default_graph()
    return random_start_indicies