示例#1
0
def on_build_serving_model(**kwargs):
    """
    1. give model image name
    2. feed (mount tracking dir to /model, model name, model version, docker repo) and run seldon python model wrapper docker command
    3. go build dir run docker image build command
    4. login dockerhub with username and password
    5. push docker image to dockerhub
    """
    working_dir = wu.make_task_working_dir(**kwargs)
    download_working_dir = wu.get_xcom_value(kwargs['ti'], 'working_dir',
                                             'op_download_train_output_model')
    wu.cpdir(download_working_dir, working_dir)
    model_files = os.path.join(train_code_dir, '*')
    wu.copy_hdfs_to_local(model_files, working_dir)

    # generate build files
    cmd = 'docker run --rm -v {}:/model deeplearningrepo/core-python-wrapper:0.7 /model {} {} {} --force'.format(
        working_dir, model_name, model_version, docker_repo)
    sp.check_call(cmd, shell=True)

    build_dir = os.path.join(working_dir, 'build')
    model_image = ('{}/{}:{}').format(docker_repo, model_name.lower(),
                                      model_version)
    cmd = 'docker build --force-rm=true -t {} {}'.format(model_image, '.')
    sp.check_call(cmd, cwd=build_dir, shell=True)

    cmd = 'docker login -u {} -p {}'.format(docker_username, docker_password)
    sp.check_call(cmd, shell=True)

    cmd = 'docker push {}'.format(model_image)
    sp.check_call(cmd, cwd=build_dir, shell=True)
示例#2
0
def download_offline_binary(host=hdfs_host):

    shutil.rmtree(jar_local_path)
    jar_files = os.path.join(jar_hdfs_path, '*')
    mkdir(jar_local_path)
    wu.copy_hdfs_to_local(jar_files, jar_local_path)
    return jar_local_path
示例#3
0
def on_download_train_output_model(**kwargs):
    working_dir = wu.make_task_working_dir(**kwargs)

    train_output_dir = wu.get_xcom_value(kwargs['ti'], 'train_output_dir',
                                         'op_train_model')
    final_eval_model_file_hdfs = os.path.join(train_output_dir,
                                              "final_eval_model.txt")
    wu.copy_hdfs_to_local(final_eval_model_file_hdfs, working_dir)

    final_eval_model_file_local = os.path.join(working_dir,
                                               "final_eval_model.txt")
    with open(final_eval_model_file_local, 'r') as f:
        final_model_path = f.read().strip()
    log.info("final model path %s", final_model_path)

    checkpoint_path = os.path.join(working_dir, "checkpoint")
    with open(checkpoint_path, 'w') as f:
        final_model_name = final_model_path.split('/')[-1]
        f.write("model_checkpoint_path: \"%s\"" % final_model_name)

    train_output_files = final_model_path + '*'
    log.info("copy from %s to local %s" % (train_output_files, working_dir))
    wu.copy_hdfs_to_local(train_output_files, working_dir)