def test_dask_local_hyper(): task = NewRun().with_hyper_params({'p1': [5, 2, 3]}, 'max.accuracy') spec = tag_test(task, 'test_dask_local_hyper') run = new_function(command='dask://').run(spec, handler=my_func) verify_state(run) assert len(run.status.iterations) == 3 + 1, 'hyper parameters test failed' pprint(run.to_dict())
tag_test, verify_state) from mlrun import new_function, NewRun, RunObject, get_run_db from mlrun.utils import run_keys, update_in def my_func(context, p1=1, p2='a-string'): print(f'Run: {context.name} (uid={context.uid})') print(f'Params: p1={p1}, p2={p2}\n') print('file\n{}\n'.format(context.get_input('infile.txt').get())) context.log_result('accuracy', p1 * 2) context.log_metric('loss', 7) context.log_artifact('chart', body='abc') base_spec = NewRun(params={'p1': 8}, out_path=out_path) base_spec.spec.inputs = {'infile.txt': 'infile.txt'} s3_spec = base_spec.copy().with_secrets('file', 'secrets.txt') s3_spec.spec.inputs = {'infile.txt': 's3://yarons-tests/infile.txt'} def test_noparams(): result = new_function().run(handler=my_func) assert result.output('accuracy') == 2, 'failed to run' assert result.status.artifacts[0].get('key') == 'chart', 'failed to run' def test_with_params(): spec = tag_test(base_spec, 'test_with_params')
import json from os import listdir from tempfile import mktemp import pytest import yaml from conftest import has_secrets, out_path, rundb_path from mlrun.artifacts import ChartArtifact, TableArtifact from mlrun import NewRun, new_function from mlrun.utils import run_keys run_spec = NewRun(params={ 'p1': 5 }, out_path=out_path, outputs=['model.txt', 'chart.html']).set_label('tests', 'kfp') def my_job(context, p1=1, p2='a-string'): # access input metadata, values, files, and secrets (passwords) print(f'Run: {context.name} (uid={context.uid})') print(f'Params: p1={p1}, p2={p2}') print('accesskey = {}'.format(context.get_secret('ACCESS_KEY'))) print('file\n{}\n'.format(context.get_input('infile.txt').get())) # RUN some useful code e.g. ML training, data prep, etc. # log scalar result values (job result metrics)
import json from copy import deepcopy from os import listdir from tempfile import mktemp import pytest import yaml from conftest import has_secrets, out_path, rundb_path from mlrun.artifacts import ChartArtifact, TableArtifact from mlrun import NewRun, run_start from mlrun.utils import run_keys run_spec = NewRun(params={'p1': 5}, out_path=out_path).set_label('tests', 'kfp') def my_job(context, p1=1, p2='a-string'): # access input metadata, values, files, and secrets (passwords) print(f'Run: {context.name} (uid={context.uid})') print(f'Params: p1={p1}, p2={p2}') print('accesskey = {}'.format(context.get_secret('ACCESS_KEY'))) print('file\n{}\n'.format(context.get_object('infile.txt').get())) # RUN some useful code e.g. ML training, data prep, etc. # log scalar result values (job result metrics) context.log_result('accuracy', p1 * 2) context.log_result('loss', p1 * 3)
time.sleep(1) # log scalar values (KFP metrics) ctx.log_result('accuracy', p1 * 2) ctx.log_result('latency', p1 * 3) # log various types of artifacts (and set UI viewers) ctx.log_artifact('test.txt', body=b'abc is 123') ctx.log_artifact('test.html', body=b'<b> Some HTML <b>', viewer='web-app') context.logger.info('run complete!') return ctx.to_json() base_spec = NewRun(params={'p1': 8}, out_path=out_path) base_spec.spec.inputs = {'infile.txt': 'infile.txt'} def verify_state(result: RunObject): state = result.status.state assert state == 'completed', 'wrong state ({}) {}'.format( state, result.status.error) def test_simple_function(): #Thread(target=create_function, args=(myfunction, 4444)).start() _thread.start_new_thread(create_function, (myfunction, 4444)) time.sleep(2) spec = tag_test(base_spec, 'simple_function')
def test_dask_local(): spec = tag_test(NewRun(params={'p1': 3, 'p2': 'vv'}), 'test_dask_local') run = new_function(command='dask://', rundb=rundb_path).run(spec, handler=my_func) verify_state(run) pprint(run.to_dict())