def setUpClass(self): self.DEBUG = False self.METRICS = False self.data_api_impl = DataApi('../../../data/') self.cross_validator_impl = CrossValidator() self.preprocessor_impl = Preprocessor()
def setUpClass(self): self.DEBUG = False self.METRICS = False # construct DataApi instance with path prefix to data directory (relative from here) self.data_api_impl = DataApi('../../../data/') self.distance_functions_impl = DistanceFunctions()
def __init__(self): self.DEBUG = True self.VERBOSE = False self.data_api_impl = DataApi('../../../data/') self.data_set = None self.CLASSIFICATION = True self.REGRESSION = False self.algorithm_name = None
def __init__(self): # logger instance - VERBOSE level is highest (most verbose) level for logging self.logger = Logger('DEMO') # configure log level here # datalayer instance - read csv data files and convert into raw data frames self.datalayer = DataApi('../../data/') # preprocessor instance - everything for prerocessing data frames self.preprocessor = Preprocessor() # cross_validator instance - setup cross validation partitions self.cross_validator = CrossValidator() # utils instance - random things self.utils = Utils()
def __init__(self): KNN.__init__(self) self.DEBUG = True self.VERBOSE = False self.data_api_impl = DataApi('../../data/') self.utilities_impl = Utilities() self.distance_functions_impl = DistanceFunctions() # threshold for clustering convergence # stop iterating when differences between consecutive centroids is smaller than this self.CONVERGENCE_THRESHOLD = 0.25 # maximum clustering iterations allowed before returning answer self.MAX_ITERATIONS = 5
def __init__(self): self.DEBUG = False # get instances of all the classes needed to run an experiment self.data_api_impl = DataApi('../../data/') self.preprocessor_impl = Preprocessor() self.cross_validator_impl = CrossValidator() self.parameter_tuner_impl = ParameterTuner() # algorithm implementations self.knn_impl = KNN() self.enn_impl = EditedKNN() self.cnn_impl = CondensedKNN() self.kmeans_knn_impl = KMeansClustering() self.k_medoids_clustering_impl = KMedoidsClustering() self.results_processor_impl = Results() self.CLASSIFICATION = False self.REGRESSION = False
def __init__(self): KNN.__init__(self) self.DEBUG = True self.data_api_impl = DataApi('../../data/')
print("Number of Previous Edits: ") print(number_of_edits_previous) loopcounter += 1 print("Number of While Loops: ") return edited_train_set.reset_index(drop=True) # EXECUTE SCRIPT if __name__ == '__main__': print('running edited knn...') edited_knn = EditedKNN() data_api_impl = DataApi('../../data/') cross_validator_impl = CrossValidator() preprocessor_impl = Preprocessor() wine_data = data_api_impl.get_raw_data_frame('segmentation') prep_wine_data = preprocessor_impl.preprocess_raw_data_frame( wine_data, 'segmentation') wine_data_train_set = cross_validator_impl.get_training_set( prep_wine_data, test_set_number=3) print('wine_data_train_set.shape: ' + str(wine_data_train_set.shape)) wine_data_test_set = cross_validator_impl.get_test_set( prep_wine_data, test_set_number, indexes_list) edited_knn.enn(wine_data_train_set, wine_data_test_set, prep_wine_data, k)
def __init__(self): self.DEBUG = False self.data_api_impl = DataApi('../../data/')
info = { "PATTERN": "looper", "LOOPER": { "initial_capital": 100000, "margin_ratio": { "rb2010.CTP": 0.00003, }, "commission_ratio": { "rb2010.CTP": { "close": 0.00001 }, }, "size_map": { "rb2010.CTP": 10 } } } app.config.from_mapping(info) strategy = DoubleMaStrategy("ma") data_api = DataApi() data = data_api.get_tick("rb2010", start_date="2020-04-10", end_date="2020-07-21", today=False) # data = data_support.get_future_min("rb2010.SHFE", frq="1min", start="2019-10-01", end="2020-07-15") app.add_data(data) app.add_extension(strategy) app.start() result = app.get_result(report=True, auto_open=True)
def __init__(self): KNN.__init__(self) self.DEBUG = False self.data_api_impl = DataApi('../../data/') self.enn_impl = EditedKNN() self.cnn_impl = CondensedKNN()
def __init__(self): self.DEBUG = False # construct DataApi instance with path prefix to data directory (relative from here) self.data_api_impl = DataApi('../../data/')
trading_record: 格式为 [(1,datetime1)] 1: 开多 2: 开空 -1: 平多 -2: 平空 """ self.data.setdefault(local_symbol, {})["record"] = trading_record self.data.setdefault(local_symbol, {})["kline"] = [[ str(kline.datetime), kline.open_price, kline.high_price, kline.low_price, kline.close_price, kline.volume ] for kline in klines] def render(self, path): for local_symbol, obj in self.data.items(): with open(path, "w") as f: print(obj) kline_string = kline_template.render(draw_klines=obj["kline"], bs=obj["record"]) f.write(kline_string) if __name__ == '__main__': plot = Plot("some") from data_api import DataApi code = "rb2105.SHFE" data_api = DataApi(uri="http://192.168.1.239:8124") kline = data_api.get_n_min_bar(code, 1, "2021-04-15", "2021-04-16") plot.add_kline(code, klines=kline, trading_record=[]) plot.render("x.html")
def __init__(self): self.DEBUG = False self.data_api_impl = DataApi('../../data/') self.utilities_impl = Utilities()