def job_delete(self, job_name): job_data = self.db.read('job', job_name) if not job_data: return False repo = Repository(job_data['repo'], db=self.db) scenario = Scenario(job_data['scenario'], repo=repo, db=self.db) job = Job(job_name, repo, scenario, db=self.db) job.delete() return job
async def job_run(self, repo, scenario): job = Job(None, repo, scenario, db=self.db) await job.execute(scenario.data) repo.latest_job = job.name repo.save() scenario.latest_job = job.name scenario.save() return job
def test_case_5(self): job = Job(source_filename=self.file1) job.material = self.material assert job.data==job.source
def test_case_3(self): """job source is an etree element""" job = Job(source_filename=self.file1) assert job.load() log.info(type(job.source)) assert type(job.source) == etree._ElementTree
def test_case_2(self): """job loaded source file""" filename = 'box.svg' job = Job(source_filename=self.file1) log.info(job.messages) assert job.load()
def test_case_1(self): """material width and length""" job = Job(source_filename=self.file1) job.material = self.material assert job.material.width==38.1 assert job.material.length==4572
'open': _sym+'_Open', 'high': _sym+'_High', 'low': _sym+'_Low', 'close': _sym, 'volume': _sym+'_Volume' }) #bchn = s.get_dataset(DatasetType.BLOCKCHAIN) # correlation(bchn.corr(), 'data/result/blockchain-corr.png', figsize=(32,18)) # pat = s.get_dataset(DatasetType.OHLCV_PATTERN) # bchn.to_csv('data/result/block1chain-dataset.csv', sep=',', encoding='utf-8', index=True, index_label='Date') # pat.to_csv('data/result/ohlcv_pattern.csv', sep=',', encoding='utf-8', index=True, index_label='Date') # fourier_transform(bchn['CapMVRVCur']) m = MNBModel() s = s.time_slice('2018-01-01', '2018-02-27', format='%Y-%m-%d') j = Job(symbol=s, model=m) reports = j.grid_search(x_type=DatasetType.DISCRETE_TA, y_type=DatasetType.DISCRETE_TA, multiprocessing=False, discretize=False, variance_threshold=0.01, params={'fit_prior': False, 'alpha': 0.01}) # Common if isinstance(reports, list): c = ReportCollection(reports) df = c.to_dataframe() print(df.head()) br = min(reports) print('Best config:\n\t{} accuracy: {} mse: {} profit: {}%'.format(str(br), str(br.accuracy()), str(br.mse()), br.profit())) else:
def test_delete(): job = Job('present_name', REPO, SCENARIO, db=DB) result = job.delete() assert_equals(result, True)
def test_dump(): job = Job('present_name', REPO, SCENARIO, db=DB) result = job.dump() assert_equals(type(result), dict)
def test_load_present(): job = Job('present_name', REPO, SCENARIO, db=DB) job.load() assert_equals(job.attribute, 51)
def test_save(): job = Job('present_name', REPO, SCENARIO, db=DB) job.attribute = 51 result = job.save() assert_equals(type(result), dict)
def test_load_absent(): job = Job('missing_name', REPO, SCENARIO, db=DB) job.load() assert_equals(job.exists, False)
log_level=logging.DEBUG, logger='job_test') df = pd.read_csv("./data/result/ohlcv.csv", sep=',', encoding='utf-8', index_col='Date', parse_dates=True) btc = pd.read_csv("./data/coinmetrics.io/btc.csv", sep=',', encoding='utf-8', index_col='date', parse_dates=True) s = Symbol('BTC', ohlcv=df, blockchain=btc, column_map={ 'open': 'BTC_Open', 'high': 'BTC_High', 'low': 'BTC_Low', 'close': 'BTC', 'volume': 'BTC_Volume' }) m = ARIMAModel() s = s.time_slice('2016-12-01', '2016-12-31', format='%Y-%m-%d') j = Job(symbol=s, model=m) r = j.holdout(x_type=DatasetType.OHLCV_PCT, y_type=DatasetType.OHLCV_PCT, univariate_column='close') #r = min(reports) print('Best config: {} mse: {}'.format(str(r), str(r.mse())))
'high': _sym + '_High', 'low': _sym + '_Low', 'close': _sym, 'volume': _sym + '_Volume' }) # bchn = s.get_dataset(DatasetType.BLOCKCHAIN) # correlation(bchn.corr(), 'data/result/blockchain-corr.png', figsize=(32,18)) # pat = s.get_dataset(DatasetType.OHLCV_PATTERN) # bchn.to_csv('data/result/block1chain-dataset.csv', sep=',', encoding='utf-8', index=True, index_label='Date') # pat.to_csv('data/result/ohlcv_pattern.csv', sep=',', encoding='utf-8', index=True, index_label='Date') # fourier_transform(bchn['CapMVRVCur']) m = ModelFactory.create_model('mlp') #s = s.add_lag(7) s = s.time_slice('2018-01-01', '2018-02-27', format='%Y-%m-%d') j = Job(symbol=s, model=m) reports = j.grid_search( x_type=DatasetType.CONTINUOUS_TA, y_type=DatasetType.DISCRETE_TA, #undersample=True, #multiprocessing=False, #discretize=False, #variance_threshold=0.01, ) # Common if isinstance(reports, list): c = ReportCollection(reports) df = c.to_dataframe() print(df.head()) br = min(reports)