コード例 #1
0
def _deploy_v0_1(project_graph=''):
    for function in project_graph['project']['functions']:
        fn = composer(project_graph['apiVersion'],
                      function['function_name'],
                      project=project_graph['project']['name'])

        fn.spec.base_spec['spec']['build']['baseImage'] = function[
            'docker_image']

        fn.spec.build.commands = format_pip_libraries(function)

        fn.spec.min_replicas = function['minReplicas']
        fn.spec.max_replicas = function['maxReplicas']

        GPU = bool(function['gpu'])

        if GPU:
            fn.spec.base_spec['spec']['resources'] = {}
            fn.spec.base_spec['spec']['resources']['limits'] = {
                'nvidia.com/gpu': function['num_gpus']
            }

        fn.set_env('V3IO_ACCESS_KEY', os.getenv('V3IO_ACCESS_KEY'))
        consumer_group = function['function_name'].replace('-', '_')

        _input_streams = function['input_streams']
        for _stream in _input_streams.keys():
            _container = project_graph['project']['v3io_streams'][_stream][
                'container']
            _stream_path = project_graph['project']['v3io_streams'][_stream][
                'path']

            _maxWorkers = _input_streams[_stream]['maxWorkers']

            try:
                _v3io_access_key = _input_streams[_stream]['v3io_access_key']
            except:
                print("Using default v3io_access_key from environment")
                _v3io_access_key = os.getenv('V3IO_ACCESS_KEY')

            try:
                _pollingIntervalMs = _input_streams[_stream][
                    'pollingIntervalMs']
            except:
                print('Using default pollingIntervalMs')
                _pollingIntervalMs = 500

            try:
                _seekTo = _input_streams[_stream]['seekTo']
            except:
                print('Using default seek to latest')
                _seekTo = 'latest'

            try:
                _readBatchSize = _input_streams[_stream]['readBatchSize']
            except:
                print('Using default readBatchSize 100')
                _readBatchSize = 100

            trigger_spec = {
                'kind':
                'v3ioStream',
                'url':
                "http://%s/%s/%s" % ('v3io-webapi:8081', _container,
                                     f'{_stream_path}@{consumer_group}'),
                "password":
                _v3io_access_key,
                "maxWorkers":
                _maxWorkers,
                'attributes': {
                    "pollingIntervalMs": _pollingIntervalMs,
                    "seekTo": _seekTo,
                    "readBatchSize": _readBatchSize,
                }
            }
            fn.add_trigger(_stream, trigger_spec)

        # These should in your Yaml
        _step_config = {
            'MODULE_PATHS': function['module_paths'],
            'IMPORT_MODULES': function['import_modules'],
            'CLASS_LOAD_FUNCTION': function['class_load_function'],
            'PROCESSING_FUNCTION': function['processing_function'],
            'STEP_NAME': function['function_name'],
            'OUTPUT_STREAM_CONTAINER': function['output_stream_container'],
            'OUTPUTS': function['outputs']
        }

        fn.set_env("STEP_CONFIG", json.dumps(_step_config))
        if 'env_custom' in function:
            for env_var in function['env_custom']:
                fn.set_env(env_var['name'], env_var['value'])

        # MOunt v3io volumes
        if 'v3io_volumes' in project_graph['project']:
            _volumes = project_graph['project']['v3io_volumes']
            for volume in _volumes.keys():
                fn.apply(
                    mount_v3io(name=volume,
                               remote=_volumes[volume]['remote'],
                               mount_path=_volumes[volume]['mount_path']))

        if 'class_init' in function:
            fn.set_env("CLASS_INIT", json.dumps(function['class_init']))

        if 'loggerSinks' in function:
            fn.spec.base_spec['spec']['loggerSinks'] = function['loggerSinks']

        deployment_url = fn.deploy(project=project_graph['project']['name'])
        print(f'Function deployed: {deployment_url}')
コード例 #2
0
def _deploy_v1alpha1(project_graph=''):
    for function in project_graph['project']['functions']:
        fn = composer(project_graph['apiVersion'],
                      function['function_name'],
                      project=project_graph['project']['name'])

        fn.with_http(workers=1).apply(mount_v3io())

        GPU = bool(function['gpu'])
        fn.spec.base_spec['spec']['build']['baseImage'] = function[
            'docker_image']
        fn.spec.build.commands = ['pip install v3io==0.4.0']

        fn.spec.min_replicas = function['minReplicas']
        fn.spec.max_replicas = function['maxReplicas']

        if GPU:
            fn.spec.base_spec['spec']['resources'] = {}
            fn.spec.base_spec['spec']['resources']['limits'] = {
                'nvidia.com/gpu': function['num_gpus']
            }

        fn.set_env('V3IO_ACCESS_KEY', os.getenv('V3IO_ACCESS_KEY'))
        INPUT_STREAM = function['input_stream']
        consumer_group = function['function_name'].replace('-', '_')
        #consumer_group='inferencegrp'

        maxWorkers = function['maxWorkers']

        trigger_spec = {
            'kind':
            'v3ioStream',
            'url':
            "http://%s/%s/%s" %
            ('v3io-webapi:8081', function['input_stream_container'],
             f'{INPUT_STREAM}@{consumer_group}'),
            "password":
            os.getenv('V3IO_ACCESS_KEY'),
            "maxWorkers":
            maxWorkers,
            'attributes': {
                "pollingIntervalMs": 500,
                "seekTo": "latest",
                "readBatchSize": 100,
            }
        }
        fn.add_trigger('input-stream', trigger_spec)

        # These should in your Yaml
        fn.set_env('MODULE_PATHS', function['module_paths'])
        fn.set_env('IMPORT_MODULES', function['import_modules'])
        fn.set_env('CLASS_LOAD_FUNCTION', function['class_load_function'])
        fn.set_env('PROCESSING_FUNCTION', function['processing_function'])
        fn.set_env('STEP_NAME', function['function_name'])
        fn.set_env('POST_PROCESS_FUNCTION', function['post_process_function'])
        fn.set_env('OUTPUT_STREAM', function['output_stream'])
        fn.set_env('OUTPUT_STREAM_CONTAINER',
                   function['output_stream_container'])

        if 'env_custom' in function:
            for env_var in function['env_custom']:
                fn.set_env(env_var['name'], env_var['value'])

        fn.apply(mount_v3io())

        addr = fn.deploy(project=project_graph['project']['name'])
コード例 #3
0
ファイル: apiv2alpha3.py プロジェクト: marcelonyc/iguazioig
def _deploy_v2alpha3(project_graph=''):
    for function in project_graph['project']['functions']:
        fn = composer(project_graph['apiVersion'],
                      function['function_name'],
                      project=project_graph['project']['name'])

        #fn.with_http(workers=1)

        fn.spec.base_spec['spec']['build']['baseImage'] = function[
            'docker_image']
        fn.spec.build.commands = ['pip install v3io==0.5.0']

        fn.spec.min_replicas = function['minReplicas']
        fn.spec.max_replicas = function['maxReplicas']

        GPU = bool(function['gpu'])

        if GPU:
            fn.spec.base_spec['spec']['resources'] = {}
            fn.spec.base_spec['spec']['resources']['limits'] = {
                'nvidia.com/gpu': function['num_gpus']
            }

        fn.set_env('V3IO_ACCESS_KEY', os.getenv('V3IO_ACCESS_KEY'))
        INPUT_STREAM = function['input_stream']
        consumer_group = function['function_name'].replace('-', '_')
        #consumer_group='inferencegrp'

        maxWorkers = function['maxWorkers']

        trigger_spec = {
            'kind':
            'v3ioStream',
            'url':
            "http://%s/%s/%s" %
            ('v3io-webapi:8081', function['input_stream_container'],
             f'{INPUT_STREAM}@{consumer_group}'),
            "password":
            os.getenv('V3IO_ACCESS_KEY'),
            "maxWorkers":
            maxWorkers,
            'attributes': {
                "pollingIntervalMs": 500,
                "seekTo": "earliest",
                "readBatchSize": 100,
            }
        }
        fn.add_trigger('input-stream', trigger_spec)

        # These should in your Yaml
        _step_config = {}
        _step_config['MODULE_PATHS'] = function['module_paths']
        _step_config['IMPORT_MODULES'] = function['import_modules']
        _step_config['CLASS_LOAD_FUNCTION'] = function['class_load_function']
        _step_config['PROCESSING_FUNCTION'] = function['processing_function']
        _step_config['STEP_NAME'] = function['function_name']
        _step_config['OUTPUT_STREAM_CONTAINER'] = function[
            'output_stream_container']
        _step_config['OUTPUTS'] = function['outputs']

        fn.set_env("STEP_CONFIG", json.dumps(_step_config))
        if 'env_custom' in function:
            for env_var in function['env_custom']:
                fn.set_env(env_var['name'], env_var['value'])

        # MOunt v3io volumes
        if 'v3io_volumes' in project_graph['project']:
            _volumes = project_graph['project']['v3io_volumes']
            for volume in _volumes.keys():
                fn.apply(
                    mount_v3io(name=volume,
                               remote=_volumes[volume]['remote'],
                               mount_path=_volumes[volume]['mount_path']))

        if 'class_init' in function:
            fn.set_env("CLASS_INIT", json.dumps(function['class_init']))

        addr = fn.deploy(project=project_graph['project']['name'])