Пример #1
0
 def get_input_data(self, skip_preprocessing, preprocessing_constant, normalization_method, database_path, feature_names, datetime_interval, blockchain_indicators):
     data = md.get_dataset_with_descriptors(skip_preprocessing = skip_preprocessing, 
                                            preproc_constant = preprocessing_constant, 
                                            normalization_method = normalization_method,
                                            dataset_directory = database_path,
                                            feature_names = feature_names,
                                            datetime_interval = datetime_interval,
                                            blockchain_indicators = blockchain_indicators)
     
     self.data = data
     self.feature_names = feature_names
     
     return self.data
Пример #2
0
def plot_vertical_lines(data, desired_color='brown'):
    ## even data samples
    data1 = [data[i] if ((i % 2) == 0) else 0 for i in range(len(data))]
    #odd data samples
    data2 = [data[i] if ((i % 2) != 0) else 0 for i in range(len(data))]
    #plot them
    plt.plot(data1, color=desired_color)
    plt.plot(data2, color=desired_color)


directory = '/home/catalin/databases/klines_2014-2018_15min/'
hard_coded_file_number = 0

data = md.get_dataset_with_descriptors(
    concatenate_datasets_preproc_flag=True,
    preproc_constant=0.99,
    normalization_method="rescale",
    dataset_directory=directory,
    hard_coded_file_number=hard_coded_file_number)
X = data['preprocessed_data']  ## this will be used for training
X_unprocessed = data['data']

close_prices = X_unprocessed[:, 0]
one_day_in_15_min_candles = 15 * 4 * 24
twelve_hrs_in_15_min_candles = 15 * 4 * 12
six_hrs_in_15_min_candles = 15 * 4 * 6
three_hrs_in_15_min_candles = 15 * 4 * 3
one_hr_in_15_min_candles = 15 * 4

SMA_12_day_values, EMA_12_day_values = SMA_EMA(close_prices, 12 * 1)
SMA_26_day_values, EMA_26_day_values = SMA_EMA(close_prices, 26 * 1)
Пример #3
0
    #                                 'slope_VBP_smooth_24',
    'nlms_indicator',
    'nlms_smoothed_indicator',
    #                                 'rls_indicator_error',
    #                                 'rls_smoothed_indicator',
    'ATR_EMA',
    'ATR_EMA_Wilder',
    'CMF_12h',
    'CMF_12h_2',
    #                                  'sentiment_indicator_positive',
    #                                 'sentiment_indicator_negative'
]

data = md.get_dataset_with_descriptors(skip_preprocessing=False,
                                       preproc_constant=0.99,
                                       normalization_method="rescale",
                                       dataset_directory=directory,
                                       feature_names=feature_names)
X = data['preprocessed_data']  ## this will be used for training
X_unprocessed = data['data']
#sys.exit()
start_date = md.get_date_from_UTC_ms(data['dataset_dict']['UTC'][0])
end_date = md.get_date_from_UTC_ms(data['dataset_dict']['UTC'][-1])

#import random
#
#noise = np.array([np.array([random.uniform(0,1) for _ in range(X.shape[0])]) for i in range(5)])
#
#X = np.concatenate([X, noise.T], axis = 1)
#X_unprocessed = np.concatenate([X_unprocessed, noise.T], axis = 1)