Example #1
0
def test_indicator(file_name, start_date, end_date, figure_title,
                   candlestick_width):
    csv_data = utils.load_data(file_name, "%Y.%m.%d %H:%M:%S")

    plot_data = csv_data.copy()

    print('Sample data:')
    plot_data['SMA'] = simple_moving_average(plot_data['Close'], 10)

    print(plot_data.tail(20))

    subplot_df1 = plot_data.drop(['Volume', 'SMA'], 1).loc[start_date:end_date]
    subplot_df2 = plot_data['SMA'].loc[start_date:end_date]

    df1_tuples = utils.candlestick_tuples(subplot_df1)

    fig, ax = plt.subplots()
    plt.xticks(rotation=45)
    plt.title(figure_title)

    fplt.candlestick_ochl(ax,
                          df1_tuples,
                          width=candlestick_width,
                          colorup='g',
                          colordown='r')
    subplot_df2.plot(ax=ax)

    plt.show()
def test_indicator(file_name, start_date, end_date, figure_title,
                   candlestick_width):
    csv_data = utils.load_data(file_name, "%Y.%m.%d %H:%M:%S")

    print("Sample data:")
    print(csv_data.head(10))

    data_CHO = chaikin_oscillator(csv_data)

    print("")
    print(data_CHO.tail(20))

    ax1_data = data_CHO.drop(
        ['Volume', 'ADL', '3 day EMA of ADL', '10 day EMA of ADL', 'CHO'], 1)
    ax1_data = ax1_data.loc[start_date:end_date]

    ax2_data = data_CHO.drop(['Open', 'High', 'Low', 'Close', 'Volume', 'CHO'],
                             1)
    ax2_data = ax2_data.loc[start_date:end_date]

    ax3_data = data_CHO['CHO'].loc[start_date:end_date]

    # fig1.suptitle('Hourly OHLC data') <- figure title

    df1_tuples = utils.candlestick_tuples(ax1_data)

    fig1, ax1 = plt.subplots(1, 1)
    ax1.xaxis_date()
    ax1.set_ylabel('OHLC', color='g')
    ax1.set_title(figure_title)
    for tick in ax1.get_xticklabels():
        tick.set_rotation(45)
    for t1 in ax1.get_yticklabels():
        t1.set_color('g')
    ax1.grid()
    fplt.candlestick_ochl(ax1,
                          df1_tuples,
                          width=candlestick_width,
                          colorup='g',
                          colordown='r')

    ax2 = ax1.twinx()
    ax2.plot(ax3_data, 'b-')
    ax2.set_ylabel('%R', color='b')
    for t1 in ax2.get_yticklabels():
        t1.set_color('b')
    fig1.show()

    fig2 = plt.figure(2)
    plt.plot(ax2_data)
    plt.title('ADL vs EMAs')
    plt.legend(['ADL', '3 day EMA of ADL', '10 day EMA of ADL'])
    plt.xlabel('Laikas')
    plt.ylabel('ADL')
    plt.grid()
    plt.xticks(rotation=90)
    fig2.show()

    plt.close()
def main():
    args = get_args()
    arch_name = args.arch
    epochs = args.epochs

    image_datasets, dataloaders = load_data(args.data_directory)

    model = create_model(arch_name, args.hidden_units)

    device = create_device(args.gpu)
    train_model(device, model, dataloaders, args.learning_rate, epochs)

    model.cpu()
    model.arch_name = arch_name
    model.class_to_idx = image_datasets['Training'].class_to_idx
    model.epochs_count = epochs

    save_model(model, args.save_directory)
def test_indicator(file_name, start_date, end_date, figure_title, candlestick_width):
    csv_data = utils.load_data(file_name, "%Y.%m.%d %H:%M:%S")

    # print("Sample data:")
    # print(csv_data.head(10))

    plot_data = williams_r(csv_data, 14)

    # print("")
    # print(plot_data.tail(20))

    subplot_df1 = plot_data.drop(['Volume', '%R', 'Highest high', 'Lowest low'], 1).loc[start_date:end_date]
    subplot_df2 = plot_data['%R'].loc[start_date:end_date]

    df1_tuples = utils.candlestick_tuples(subplot_df1)
    subplot_df2 = utils.reset_date_index(subplot_df2)

    # subplots
    fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col')

    # fig.set_size_inches(8, 8)
    ax1.xaxis_date()
    # ax1.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
    ax1.set_title(figure_title)
    for tick in ax1.get_xticklabels():
        tick.set_rotation(45)
    ax1.grid()
    fplt.candlestick_ochl(ax1, df1_tuples, width=candlestick_width, colorup='g', colordown='r')

    subplot_df2.plot(ax=ax2)
    for tick in ax2.get_xticklabels():
        tick.set_rotation(90)
    ax2.grid(which='minor', linestyle='--')
    ax2.grid(which='major', linestyle='-')
    fig.subplots_adjust(hspace=0.0)
    fig.show()
    plt.close()
Example #5
0
def test_indicator(file_name, start_date, end_date, figure_title, candlestick_width):
    csv_data = utils.load_data(file_name, "%Y.%m.%d %H:%M:%S")

    print("Sample data:")
    print(csv_data.head(10))

    plot_data = csv_data.copy()

    plot_data['ADL'] = accumulation_distribution_line(plot_data)

    print(plot_data.tail(10))

    subplot_df1 = plot_data.drop(['Volume', 'ADL'], 1).loc[start_date:end_date]
    subplot_df2 = plot_data['ADL'].loc[start_date:end_date]

    df1_tuples = utils.candlestick_tuples(subplot_df1)
    subplot_df2 = utils.reset_date_index(subplot_df2)

    # subplots
    fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col')

    ax1.xaxis_date()
    ax1.set_title(figure_title)
    # for tick in ax1.get_xticklabels():
    #     tick.set_rotation(45)
    ax1.grid()
    fplt.candlestick_ochl(ax1, df1_tuples, width=candlestick_width, colorup='g', colordown='r')

    subplot_df2.plot(ax=ax2)
    for tick in ax2.get_xticklabels():
        tick.set_rotation(45)
    ax2.grid(which='minor', linestyle='--')
    ax2.grid(which='major', linestyle='-')
    fig.subplots_adjust(hspace=0.0)
    fig.show()
    plt.close()
Example #6
0
            results["metrics"][name]["index"],
            results["metrics"][name]["values"],
            *args,
            **kwargs,
            label=name
        )


#%%

DATA_DIR = "data/"
N_JOBS = -1
results = dict()


full_k, real_img, mask_loc, final_mask = load_data(DATA_DIR, 2, monocoil=False)
final_k = np.squeeze(full_k * final_mask[np.newaxis])
square_mask = np.zeros(real_img.shape)
real_img_size = real_img.shape
img_size = [min(real_img.shape)] * 2
square_mask[
    real_img_size[0] // 2 - img_size[0] // 2 : real_img_size[0] // 2 + img_size[0] // 2,
    real_img_size[1] // 2 - img_size[1] // 2 : real_img_size[1] // 2 + img_size[1] // 2,
] = 1
trajectories = sp.io.loadmat("data/NCTrajectories.mat")

#%% md

# Offline reconstruction

#%%
    for idx, setup in enumerate(setups):
        data, problem, solver = (
            copy.deepcopy(setup["data"]),
            copy.deepcopy(setup["problem"]),
            copy.deepcopy(setup["solver"]),
        )
        e = Experience(data, problem, solver)
        print(f" == {idx}/{len(setups)}: {e.hex_hash()} == ")
        pprint(e.id())
        if not args.FORCE and e.hex_hash() in hash_list:
            print("already computed")
            continue

        if not args.DRY:
            # get data
            full_k, real_img, mask_loc, final_mask = load_data(
                DATA_DIR, **data)
            final_k = np.squeeze(full_k * final_mask[np.newaxis])
            # create the online problem
            ksp, fft, lin, prox = get_operators(kspace_data=final_k,
                                                loc=mask_loc,
                                                mask=final_mask,
                                                **problem)
            alg_name = solver.pop("algo")
            online_pb = OnlineReconstructor(fft,
                                            lin,
                                            regularizer_op=prox,
                                            opt=alg_name,
                                            verbose=0)
            # configure metrics
            metrics_config = create_cartesian_metrics(online_pb,
                                                      real_img,
Example #8
0
# arguments
period = 131   # exponential moving average period
ctrl_ticks = 0       # minimum amount of ticks a triggerred signal must pass before buying/selling
stop_loss = 10
take_profit = 10
bear_open = -20
bear_close = -80
bull_open = -50
bull_close = -20
init_budget = 10000  # initial user budget
weight = 15  # volume of item bought on trade
expenses = 0.01   # expenses on each trade

# load data from csv file
csv_data = utils.load_data('xauusd_m10_data1.csv', "%Y.%m.%d %H:%M:%S")
dates = pd.to_datetime(csv_data.index.values, format="%Y.%m.%d %H:%M:%S")

# apply williams %r indicator
wr_data = wr.williams_r(csv_data, 14)
wr_data = wr_data[['Close', '%R']]
wr_data['EMA'] = ema.exponential_moving_average(csv_data['Close'], period)

# opt_period, opt_bear_open, opt_bear_close, opt_bull_open, opt_bull_close = WR_strategy.optimize_strategy2(wr_data, dates, init_budget, weight, expenses)
# profits, bull_buys, bull_sales, bear_buys, bear_sales, stop_loss_trades, take_profit_trades = \
#     WR_strategy.wr_strategy(True, wr_data, dates, opt_period, ctrl_ticks, stop_loss, take_profit, opt_bear_open, opt_bear_close, opt_bull_open, opt_bull_close, init_budget, weight, expenses)
# print("Optimal period: ", opt_period, ", bear open: ", opt_bear_open, ", bear close: ", opt_bear_close, ", bull open: ", opt_bull_open, ", bull close: ", opt_bull_close)


profits, bull_buys, bull_sales, bear_buys, bear_sales, stop_loss_trades, take_profit_trades = \
    WR_strategy.wr_strategy(True, wr_data, dates, period, ctrl_ticks, stop_loss, take_profit,
Example #9
0
def run_experiment(lr=0.01, num_epochs=128, nkerns=[96, 192, 10], lambda_decay=1e-3, conv_arch=all_CNN_C, n_class=10,
                   batch_size=128, verbose=False, filter_size=(3,3)):
    """
    Wrapper function for testing the all convolutional networks implemented here

    :type lr: float
    :param lr: learning rate used (factor for the stochastic
    gradient)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type nkerns: list of ints
    :param nkerns: number of kernels on each layer

    :type batch_size: int
    :param batch_szie: number of examples in minibatch.

    :type verbose: boolean
    :param verbose: to print out epoch summary or not to.

    :type filter_size: tuple(int)
    :param filter_size: size of the filters.

    :type conv_arch: function
    :param verbose: Convolutional Network to run

    :type weight_decay: float
    :param weight_decay: L2 regularization parameter

    :type n_class: int
    :param n_class: Number of classes/output units of final layer (10 vs. 100)

    """
    datasets = load_data(
        simple=False if n_class == 100 else True
    )

    X_train, y_train = datasets[0]
    X_val, y_val = datasets[1]
    X_test, y_test = datasets[2]

    n_train_batches = X_train.get_value(borrow=True).shape[0]
    n_valid_batches = X_val.get_value(borrow=True).shape[0]
    n_test_batches = X_test.get_value(borrow=True).shape[0]
    n_train_batches //= batch_size
    n_valid_batches //= batch_size
    n_test_batches //= batch_size

    index = T.lscalar()  # index to a [mini]batch

    x = T.tensor4('x')
    y = T.ivector('y')

    channel = 3
    imsize = 32

    data_size = X_train.eval().shape[0]
    tdata_size = X_test.eval().shape[0]
    vdata_size = X_val.eval().shape[0]

    X_train = X_train.reshape((data_size, channel, imsize, imsize))
    X_test = X_test.reshape((tdata_size, channel, imsize, imsize))
    X_val = X_val.reshape((vdata_size, channel, imsize, imsize))

    # Building the all conv network

    network = all_CNN_C(x, filter_size=filter_size, n_class=n_class)

    # Loss and prediction calculation
    # Training loss function used is Categorical Cross Entropy
    # which computes the categorical cross-entropy between predictions and targets.

    train_prediction = lasagne.layers.get_output(network)
    train_loss = lasagne.objectives.categorical_crossentropy(train_prediction, y)
    train_loss = train_loss.mean()

    # Regularization
    l2_penalty = lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)
    train_loss += lambda_decay * l2_penalty

    params = lasagne.layers.get_all_params(network, trainable=True)

    #Updates to the parameters are defined here

    updates = lasagne.updates.nesterov_momentum(
        train_loss, params, learning_rate=lr, momentum=0.9)

    val_prediction = lasagne.layers.get_output(network)
    val_loss = errors(val_prediction, y)

    test_prediction = lasagne.layers.get_output(network, deterministic=True)
    test_loss = errors(test_prediction, y)


   # Training, Validation and test models are defined here

    train_fn = theano.function([index],
           train_loss,
           updates=updates,
           givens={
               x: X_train[index * batch_size: (index + 1) * batch_size],
               y: y_train[index * batch_size: (index + 1) * batch_size]
         }
    )

    val_fn = theano.function(
        [index],
        val_loss,
        givens={
            x: X_val[index * batch_size: (index + 1) * batch_size],
            y: y_val[index * batch_size: (index + 1) * batch_size]
        }
    )

    test_fn = theano.function(
        [index],
        test_loss,
        givens={
            x: X_test[index * batch_size: (index + 1) * batch_size],
            y: y_test[index * batch_size: (index + 1) * batch_size]
        }
    )

    train_nn(train_fn, val_fn, test_fn,
             n_train_batches, n_valid_batches, n_test_batches, num_epochs,
             verbose=verbose)