def install_local_livy(args):
    if not exists(conn,
                  '/home/' + args.os_user + '/.ensure_dir/local_livy_ensured'):
        conn.sudo('wget http://archive.cloudera.com/beta/livy/livy-server-' +
                  args.livy_version + '.zip -O /opt/livy-server-' +
                  args.livy_version + '.zip')
        conn.sudo('unzip /opt/livy-server-' + args.livy_version +
                  '.zip -d /opt/')
        conn.sudo('mv /opt/livy-server-' + args.livy_version + '/ /opt/livy/')
        conn.sudo('mkdir -p /var/run/livy')
        conn.sudo('mkdir -p /opt/livy/logs')
        conn.sudo('chown ' + args.os_user + ':' + args.os_user +
                  ' -R /var/run/livy')
        conn.sudo('chown ' + args.os_user + ':' + args.os_user +
                  ' -R /opt/livy/')
        conn.put(templates_dir + 'livy-server-cluster.service',
                 '/tmp/livy-server-cluster.service')
        conn.sudo('mv /tmp/livy-server-cluster.service /opt/')
        conn.put(templates_dir + 'livy-server.service',
                 '/tmp/livy-server.service')
        conn.sudo("sed -i 's|OS_USER|" + args.os_user +
                  "|' /tmp/livy-server.service")
        conn.sudo("chmod 644 /tmp/livy-server.service")
        conn.sudo(
            'cp /tmp/livy-server.service /etc/systemd/system/livy-server.service'
        )
        conn.sudo("systemctl daemon-reload")
        conn.sudo("systemctl enable livy-server")
        conn.sudo('touch /home/' + args.os_user +
                  '/.ensure_dir/local_livy_ensured')
Example #2
0
def _get_latest_source(c, site_dir):
    if exists(c, site_dir / ".git"):
        c.run(f"cd {site_dir} && git fetch")
    else:
        c.run(f"git clone {REPO_URL} {site_dir}")
    current_commit = c.local("git log -n 1 --format=%H").stdout
    c.run(f"cd {site_dir} && git reset --hard {current_commit}")
def configure_local_spark_kernels(args, python_venv_path):
    if not exists(
            conn, '/home/' + args.os_user +
            '/.ensure_dir/local_spark_kernel_ensured'):
        conn.put(templates_dir + 'interpreter_spark.json',
                 '/tmp/interpreter.json')
        conn.sudo('sed -i "s|ENDPOINTURL|' + args.endpoint_url +
                  '|g" /tmp/interpreter.json')
        conn.sudo('sed -i "s|OS_USER|' + args.os_user +
                  '|g" /tmp/interpreter.json')
        spark_memory = get_spark_memory()
        conn.sudo(
            'sed -i "s|DRIVER_MEMORY|{}m|g" /tmp/interpreter.json'.format(
                spark_memory))
        conn.sudo(
            'sed -i "s|PYTHON_VENV_PATH|{}|g" /tmp/interpreter.json'.format(
                python_venv_path))
        update_zeppelin_interpreters(args.multiple_clusters, r_enabled,
                                     'local')
        conn.sudo(
            'cp -f /tmp/interpreter.json /opt/zeppelin/conf/interpreter.json')
        conn.sudo('chown ' + args.os_user + ':' + args.os_user +
                  ' -R /opt/zeppelin/')
        conn.sudo('touch /home/' + args.os_user +
                  '/.ensure_dir/local_spark_kernel_ensured')
    conn.sudo("systemctl stop zeppelin-notebook")
    conn.sudo("systemctl daemon-reload")
    conn.sudo("systemctl enable zeppelin-notebook")
    conn.sudo("systemctl start zeppelin-notebook")
def create_user():
    initial_user = '******'
    sudo_group = 'sudo'
    with Connection(host=args.hostname, user=initial_user,
                    connect_kwargs={'key_filename': args.pkey}) as conn:
        try:
            if not exists(conn,
                          '/home/{}/.ssh_user_ensured'.format(initial_user)):
                conn.sudo('useradd -m -G {1} -s /bin/bash {0}'
                          .format(args.os_user, sudo_group))
                conn.sudo(
                    'bash -c \'echo "{} ALL = NOPASSWD:ALL" >> /etc/sudoers\''.format(args.os_user, initial_user))
                conn.sudo('mkdir /home/{}/.ssh'.format(args.os_user))
                conn.sudo('chown -R {0}:{0} /home/{1}/.ssh/'
                          .format(initial_user, args.os_user))
                conn.sudo('cat /home/{0}/.ssh/authorized_keys > '
                          '/home/{1}/.ssh/authorized_keys'
                          .format(initial_user, args.os_user))
                conn.sudo(
                    'chown -R {0}:{0} /home/{0}/.ssh/'.format(args.os_user))
                conn.sudo('chmod 700 /home/{0}/.ssh'.format(args.os_user))
                conn.sudo('chmod 600 /home/{0}/.ssh/authorized_keys'
                          .format(args.os_user))
                conn.sudo(
                    'touch /home/{}/.ssh_user_ensured'.format(initial_user))
        except Exception as err:
            logging.error('Failed to create new os_user: ', str(err))
            sys.exit(1)
Example #5
0
def _update_settings(c, source_dir):
    # Disable debug
    settings_path = source_dir / "brynweb/settings.py"
    c.run(f"sed -i 's/DEBUG = True/DEBUG = False/g' {settings_path}")

    # Set allowed hosts
    hosts_find = "ALLOWED_HOSTS =.\\+$"
    hosts_replace = f'ALLOWED_HOSTS = ["{c.host}"]'
    c.run(f"sed -i 's/{hosts_find}/{hosts_replace}/g' {settings_path}")

    # Update SITE_SCHEME
    site_scheme_find = "SITE_SCHEME =.\\+$"
    site_scheme_replace = 'SITE_SCHEME = "https"'
    c.run(f"sed -i 's/{site_scheme_find}/{site_scheme_replace}/g' {settings_path}")

    # Update SITE_DOMAIN
    site_domain_find = "SITE_DOMAIN =.\\+$"
    site_domain_replace = f'SITE_DOMAIN = "{c.host}"'
    c.run(f"sed -i 's/{site_domain_find}/{site_domain_replace}/g' {settings_path}")

    # Create and import secret key
    secret_key_path = source_dir / "brynweb/secret_key.py"
    if not exists(c, secret_key_path):
        chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
        key = "".join(random.SystemRandom().choice(chars) for _ in range(50))
        append(c, secret_key_path, f'SECRET_KEY = "{key}"')
    append(c, settings_path, "\nfrom .secret_key import SECRET_KEY")
def configure_local_livy_kernels(args):
    if not exists(
            conn, '/home/' + args.os_user +
            '/.ensure_dir/local_livy_kernel_ensured'):
        port_number_found = False
        default_port = 8998
        livy_port = ''
        conn.put(templates_dir + 'interpreter_livy.json',
                 '/tmp/interpreter.json')
        conn.sudo('sed -i "s|ENDPOINTURL|' + args.endpoint_url +
                  '|g" /tmp/interpreter.json')
        conn.sudo('sed -i "s|OS_USER|' + args.os_user +
                  '|g" /tmp/interpreter.json')
        spark_memory = get_spark_memory()
        conn.sudo(
            'sed -i "s|DRIVER_MEMORY|{}m|g" /tmp/interpreter.json'.format(
                spark_memory))
        while not port_number_found:
            port_free = conn.sudo(
                'nmap -p ' + str(default_port) +
                ' localhost | grep "closed" > /dev/null; echo $?').stdout
            port_free = port_free[:1]
            if port_free == '0':
                livy_port = default_port
                port_number_found = True
            else:
                default_port += 1
        conn.sudo('sed -i "s|LIVY_PORT|' + str(livy_port) +
                  '|g" /tmp/interpreter.json')
        update_zeppelin_interpreters(args.multiple_clusters, r_enabled,
                                     'local')
        conn.sudo(
            'cp -f /tmp/interpreter.json /opt/zeppelin/conf/interpreter.json')
        conn.sudo('echo "livy.server.port = ' + str(livy_port) +
                  '" >> /opt/livy/conf/livy.conf')
        conn.sudo(
            '''echo "SPARK_HOME='/opt/spark/'" >> /opt/livy/conf/livy-env.sh'''
        )
        if exists(conn, '/opt/livy/conf/spark-blacklist.conf'):
            conn.sudo('sed -i "s/^/#/g" /opt/livy/conf/spark-blacklist.conf')
        conn.sudo("systemctl start livy-server")
        conn.sudo('chown ' + args.os_user + ':' + args.os_user +
                  ' -R /opt/zeppelin/')
        conn.sudo('touch /home/' + args.os_user +
                  '/.ensure_dir/local_livy_kernel_ensured')
    conn.sudo("systemctl daemon-reload")
    conn.sudo("systemctl start zeppelin-notebook")
Example #7
0
def install_cntk(os_user, cntk_version):
    if not exists(datalab.fab.conn,'/home/{}/.ensure_dir/cntk_ensured'.format(os_user)):
        datalab.fab.conn.sudo('echo "exclude=*.i386 *.i686" >> /etc/yum.conf')
        manage_pkg('clean', 'remote', 'all')
        manage_pkg('update-minimal --security -y', 'remote', '')
        manage_pkg('-y install --nogpgcheck', 'remote', 'openmpi openmpi-devel')
        datalab.fab.conn.sudo('pip3.5 install https://cntk.ai/PythonWheel/GPU/cntk-{}-cp35-cp35m-linux_x86_64.whl --no-cache-dir'.format(cntk_version))
        datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/cntk_ensured'.format(os_user))
Example #8
0
def update_source(c):
    source_dir = get_source_dir(c)
    if exists(c, source_dir + '/.git'):
        c.run('cd %s && git fetch' % source_dir)
    else:
        c.run('git clone %s %s' % (REPO_URL, source_dir))
    current_commit = c.local('git log -n 1 --format=%H', hide=True).stdout
    c.run(f'cd {source_dir} && git reset --hard {current_commit}')
Example #9
0
def install_mxnet(os_user, mxnet_version):
    if not exists(datalab.fab.conn,
                  '/home/{}/.ensure_dir/mxnet_ensured'.format(os_user)):
        datalab.fab.conn.sudo(
            'pip3 install mxnet-cu101=={} opencv-python --no-cache-dir'.format(
                mxnet_version))
        datalab.fab.conn.sudo(
            'touch /home/{}/.ensure_dir/mxnet_ensured'.format(os_user))
Example #10
0
def ensure_dir_endpoint():
    try:
        if not exists(conn, '/home/{}/.ensure_dir'.format(args.os_user)):
            conn.sudo('mkdir /home/{}/.ensure_dir'.format(args.os_user))
    except Exception as err:
        logging.error('Failed to create ~/.ensure_dir/: ', str(err))
        traceback.print_exc()
        sys.exit(1)
Example #11
0
def ensure_jre_jdk(os_user):
    if not exists(datalab.fab.conn,'/home/' + os_user + '/.ensure_dir/jre_jdk_ensured'):
        try:
            manage_pkg('-y install', 'remote', 'java-1.8.0-openjdk')
            manage_pkg('-y install', 'remote', 'java-1.8.0-openjdk-devel')
            datalab.fab.conn.sudo('touch /home/' + os_user + '/.ensure_dir/jre_jdk_ensured')
        except:
            sys.exit(1)
Example #12
0
def ensure_sbt(os_user):
    if not exists(datalab.fab.conn,'/home/{}/.ensure_dir/sbt_ensured'.format(os_user)):
        try:
            datalab.fab.conn.sudo('curl https://bintray.com/sbt/rpm/rpm | sudo tee /etc/yum.repos.d/bintray-sbt-rpm.repo')
            manage_pkg('-y install', 'remote', 'sbt')
            datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/sbt_ensured'.format(os_user))
        except:
            sys.exit(1)
Example #13
0
def _get_latest_source(c):
    c.run('pwd')
    if exists(c, '.git'):
        c.run('git fetch')
    else:
        c.run('git clone {} .'.format(REPO_URL))
    current_commit = lrun('git log -n 1 --format=%H')
    c.run('git reset --hard {}'.format(current_commit.stdout.strip()))
Example #14
0
def create_key_dir_endpoint():
    try:
        if not exists(conn, '/home/{}/keys'.format(args.os_user)):
            conn.run('mkdir /home/{}/keys'.format(args.os_user))
    except Exception as err:
        logging.error('Failed create keys directory as ~/keys: ', str(err))
        traceback.print_exc()
        sys.exit(1)
Example #15
0
def ensure_scala(scala_link, scala_version, os_user):
    if not exists(datalab.fab.conn,'/home/' + os_user + '/.ensure_dir/scala_ensured'):
        try:
            datalab.fab.conn.sudo('wget {}scala-{}.rpm -O /tmp/scala.rpm'.format(scala_link, scala_version))
            datalab.fab.conn.sudo('rpm -i /tmp/scala.rpm')
            datalab.fab.conn.sudo('touch /home/' + os_user + '/.ensure_dir/scala_ensured')
        except:
            sys.exit(1)
Example #16
0
def configure_notebook(keyfile, hoststring):
    templates_dir = '/root/templates/'
    scripts_dir = '/root/scripts/'
    conn.run('mkdir -p /tmp/{}/'.format(args.cluster_name))
    if os.environ['notebook_multiple_clusters'] == 'true':
        conn.put(
            templates_dir + 'dataengine_interpreter_livy.json',
            '/tmp/{}/dataengine_interpreter.json'.format(args.cluster_name))
    else:
        conn.put(
            templates_dir + 'dataengine_interpreter_spark.json',
            '/tmp/{}/dataengine_interpreter.json'.format(args.cluster_name))
    conn.put(
        templates_dir + 'notebook_spark-defaults_local.conf',
        '/tmp/{}/notebook_spark-defaults_local.conf'.format(args.cluster_name))
    spark_master_ip = args.spark_master.split('//')[1].split(':')[0]
    spark_memory = get_spark_memory(True, args.os_user, spark_master_ip,
                                    keyfile)
    conn.run(
        'sed -i "s|EXECUTOR_MEMORY|{}m|g " /tmp/{}/dataengine_interpreter.json'
        .format(spark_memory, args.cluster_name))
    conn.run(
        'echo "spark.executor.memory {0}m" >> /tmp/{1}/notebook_spark-defaults_local.conf'
        .format(spark_memory, args.cluster_name))
    if not exists(conn,
                  '/usr/local/bin/zeppelin_dataengine_create_configs.py'):
        conn.put(scripts_dir + 'zeppelin_dataengine_create_configs.py',
                 '/tmp/zeppelin_dataengine_create_configs.py')
        conn.sudo(
            'cp /tmp/zeppelin_dataengine_create_configs.py /usr/local/bin/zeppelin_dataengine_create_configs.py'
        )
        conn.sudo(
            'chmod 755 /usr/local/bin/zeppelin_dataengine_create_configs.py')
    if not exists(conn, '/usr/lib/python3.8/datalab/'):
        conn.sudo('mkdir -p /usr/lib/python3.8/datalab/')
        conn.local(
            'cd  /usr/lib/python3.8/datalab/; tar -zcvf /tmp/datalab.tar.gz *')
        conn.put('/tmp/datalab.tar.gz', '/tmp/datalab.tar.gz')
        conn.sudo(
            'tar -zxvf /tmp/datalab.tar.gz -C /usr/lib/python3.8/datalab/')
        conn.sudo('chmod a+x /usr/lib/python3.8/datalab/*')
        if exists(conn, '/usr/lib64'):
            conn.sudo('mkdir -p /usr/lib64/python3.8')
            conn.sudo(
                'ln -fs /usr/lib/python3.8/datalab /usr/lib64/python3.8/datalab'
            )
Example #17
0
    def __enter__(self):
        activate = posixpath.join(self._path, 'bin/activate')
        c = self._c
        path = self._path

        if not exists(c, activate) and not self._create:
            raise OSError("Cannot activate virtualenv %s" % path)

        elif not exists(c, activate) and self._create:
            create_virtual_env(self._c, self._path)

        elif self._recreate:
            remove_virtual_env(self._c, self._path)
            create_virtual_env(self._c, self._path)

        self._c.command_prefixes.append('. %s' % activate)
        return self
Example #18
0
 def remote_exists(self, file):
     """ 是否存在远程文件 file
     """
     self.check_remote_conn()
     # files.exists 仅接受字符串
     if isinstance(file, Path):
         file = str(file.resolve())
     return files.exists(self.conn, file)
Example #19
0
def upload_template_and_reload(c, name):
    """
    Uploads a template only if it has changed, and if so, reload the
    related service.
    """
    #template = get_templates()[name]
    template = templates[name]
    local_path = template["local_path"]
    #print(local_path)
    if not os.path.exists(local_path):
        project_root = os.path.dirname(os.path.abspath(__file__))
        local_path = os.path.join(project_root, local_path)
    remote_path = template["remote_path"]
    reload_command = template.get("reload_command")
    owner = template.get("owner")
    mode = template.get("mode")
    remote_data = ""
    if exists(c, remote_path):
        remote_data = sudo(c, f"cat {remote_path}", show=False).stdout
        #print(remote_data)
        #with hide("stdout"):
            #remote_data = sudo("cat %s" % remote_path, show=False)
    
    with open(local_path, "r") as f:
        local_data = f.read()
        #print(local_data)
        # Escape all non-string-formatting-placeholder occurrences of '%':
        local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
        if "%(db_pass)s" in local_data:
            env.db_pass = db_pass()
        
        for v in re.finditer(f"%\((\w+)\)s",local_data):
            #print(v.group(0), v.group(1))
            env_attr = v.group(1)
            if not hasattr(env, env_attr):
                raise Exception(f"env missing attribute '{env_attr}'")
#             else:
#                 print(getattr(env, env_attr))
            local_data = re.sub(f"%\({env_attr}\)s", getattr(env, env_attr), local_data)
        #local_data %= env
        #print(local_data)
    clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
    if clean(remote_data) == clean(local_data):
        return
        
    temp_path = local_path + ".temp"
    f = open(temp_path, 'w')
    f.writelines(local_data)
    f.close()
    #upload_template(c, local_path, remote_path, env, use_sudo=True, backup=False)
    upload_template(c, temp_path, remote_path, env, use_sudo=True, backup=False)
    os.unlink(temp_path)
    if owner:
        sudo(c, f"chown {owner} {remote_path}")
    if mode:
        sudo(c, f"chmod {mode} {remote_path}")
    if reload_command:
        sudo(c, reload_command)
def ensure_docker_endpoint():
    try:
        if not exists(conn, '/home/{}/.ensure_dir/docker_ensured'.format(args.os_user)):
            conn.sudo("bash -c "
                      "'curl -fsSL https://download.docker.com/linux/ubuntu/gpg"
                      " | apt-key add -'")
            conn.sudo('add-apt-repository "deb [arch=amd64] '
                      'https://download.docker.com/linux/ubuntu '
                      '$(lsb_release -cs) stable"')
            conn.sudo('apt-get update')
            conn.sudo('apt-cache policy docker-ce')
            conn.sudo('apt-get install -y docker-ce={}'
                      .format(args.docker_version))
            if not exists(conn, '{}/tmp'.format(args.dlab_path)):
                conn.run('mkdir -p {}/tmp'.format(args.dlab_path))
            conn.put('./daemon.json',
                     '{}/tmp/daemon.json'.format(args.dlab_path))
            conn.sudo('sed -i "s|REPOSITORY|{}:{}|g" {}/tmp/daemon.json'
                      .format(args.repository_address,
                              args.repository_port,
                              args.dlab_path))
            if args.cloud_provider == "aws":
                dns_ip_resolve = (conn.run("systemd-resolve --status "
                                           "| grep -A 5 'Current Scopes: DNS' "
                                           "| grep 'DNS Servers:' "
                                           "| awk '{print $3}'")
                                  .stdout.rstrip("\n\r"))
                conn.sudo('sed -i "s|DNS_IP_RESOLVE|\"dns\": [{0}],|g" {1}/tmp/daemon.json'
                          .format(dns_ip_resolve, args.dlab_path))
            elif args.cloud_provider == "gcp":
                dns_ip_resolve = ""
                conn.sudo('sed -i "s|DNS_IP_RESOLVE||g" {1}/tmp/daemon.json'
                          .format(dns_ip_resolve, args.dlab_path))
            conn.sudo('mv {}/tmp/daemon.json /etc/docker'
                      .format(args.dlab_path))
            conn.sudo('usermod -a -G docker ' + args.os_user)
            conn.sudo('update-rc.d docker defaults')
            conn.sudo('update-rc.d docker enable')
            conn.sudo('service docker restart')
            conn.sudo('touch /home/{}/.ensure_dir/docker_ensured'
                      .format(args.os_user))
    except Exception as err:
        logging.error('Failed to install Docker: ', str(err))
        traceback.print_exc()
        sys.exit(1)
Example #21
0
    def setup_git_env(self):
        print("\n\n==> setup_git_env\n\n")
        if not exists(self.cnx, self.appDir + '/.git'):
            self.rc.clone(branch=self.stage['branch'],
                          repo=self.app['repo'],
                          pattern=self.appDir)

        result = self.rc.reset(branch=self.stage['branch'],
                               pattern=self.appDir)
Example #22
0
def _get_latest_source(c):
    print('Getting latest source...')
    if exists(c, '.git'):
        c.run('git fetch')
    else:
        c.run(f'git clone {REPO_URL} .')
    current_commit = run("git log -n 1 --format=%H", hide=True).stdout.strip()
    c.run(f'git reset --hard {current_commit}')
    print('Done\n')
Example #23
0
def ensure_java(user):
    try:
        if not exists(datalab.fab.conn,
                      '/home/{}/.ensure_dir/java_ensured'.format(user)):
            manage_pkg('-y install', 'remote', 'java-1.8.0-openjdk-devel')
            datalab.fab.conn.sudo(
                'touch /home/{}/.ensure_dir/java_ensured'.format(user))
    except:
        sys.exit(1)
Example #24
0
def ensure_matplot(os_user):
    if not exists(datalab.fab.conn,'/home/{}/.ensure_dir/matplot_ensured'.format(os_user)):
        try:
            datalab.fab.conn.sudo('python3.5 -m pip install matplotlib=={} --no-cache-dir'.format(os.environ['notebook_matplotlib_version']))
            if os.environ['application'] in ('tensor', 'deeplearning'):
                datalab.fab.conn.sudo('python3.8 -m pip install -U numpy=={} --no-cache-dir'.format(os.environ['notebook_numpy_version']))
            datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/matplot_ensured'.format(os_user))
        except:
            sys.exit(1)
Example #25
0
def bootstrap(ctx):
    """Bootstraps a repository your project."""

    project = ctx.project

    if not exists(ctx, Path(ctx.paths.remote.repo) / '.git'):
        sys_git.clone(ctx,
                      path_base=ctx.paths.remote.project.home,
                      repo_url=project.repo,
                      dir_target='.')
def ensure_jre_jdk(os_user):
    if not exists(conn,'/home/' + os_user + '/.ensure_dir/jre_jdk_ensured'):
        try:
            conn.sudo('mkdir -p /home/' + os_user + '/.ensure_dir')
            conn.sudo('apt-get update')
            conn.sudo('apt-get install -y default-jre')
            conn.sudo('apt-get install -y default-jdk')
            conn.sudo('touch /home/' + os_user + '/.ensure_dir/jre_jdk_ensured')
        except:
            sys.exit(1)
Example #27
0
def remove(conn, c=None):
    """
    Blow away the current project.
    """
    c = connect(c)
    if exists(c, env.venv_path):
        run(c, f"rm -rf {env.venv_path}")
    if exists(c, env.proj_path):
        run(c, f"rm -rf {env.proj_path}")
    #for template in get_templates().values():
    for template in templates.values():
        remote_path = template["remote_path"]
        if exists(c, remote_path):
            sudo(c, f"rm {remote_path}")
    if exists(c, env.repo_path):
        run(c, "rm -rf {env.repo_path}")
    sudo(c, "supervisorctl update")
    psql(c, f"DROP DATABASE IF EXISTS {env.proj_name};")
    psql(c, f"DROP USER IF EXISTS {env.proj_name};")
Example #28
0
def install_livy_dependencies(os_user):
    if not exists(
            datalab.fab.conn,
            '/home/' + os_user + '/.ensure_dir/livy_dependencies_ensured'):
        manage_pkg('-y install', 'remote', 'libkrb5-dev')
        datalab.fab.conn.sudo(
            'pip3 install cloudpickle requests requests-kerberos flake8 flaky pytest --no-cache-dir'
        )
        datalab.fab.conn.sudo('touch /home/' + os_user +
                              '/.ensure_dir/livy_dependencies_ensured')
Example #29
0
def _setup_repo(conn: Connection, repo_dir: str, repo_name: str):
    directory(conn, repo_dir, group=WEBADMIN_GROUP, sudo=True)

    if not exists(conn, '{0}/.git'.format(repo_dir)):
        if not verify_access_token():
            raise Exception(
                "Unable to access GitHub account. Run 'auth' to fix this")
        create_key(conn, repo_name, WEBADMIN_GROUP)
        add_repo_key(conn, repo_name)
        clone(conn, repo_name, repo_dir, skip_strict_key_checking=True)
Example #30
0
def configure_notebook(keyfile):
    templates_dir = '/root/templates/'
    files_dir = '/root/files/'
    scripts_dir = '/root/scripts/'
    datalab.fab.conn.run('mkdir -p /tmp/{}/'.format(args.cluster_name))
    datalab.fab.conn.put(templates_dir + 'sparkmagic_config_template.json',
                         '/tmp/sparkmagic_config_template.json')
    #put(templates_dir + 'pyspark_dataengine_template.json', '/tmp/{}/pyspark_dataengine_template.json'.format(args.cluster_name))
    #put(templates_dir + 'r_dataengine_template.json', '/tmp/{}/r_dataengine_template.json'.format(args.cluster_name))
    #put(templates_dir + 'toree_dataengine_template.json','/tmp/{}/toree_dataengine_template.json'.format(args.cluster_name))
    # conn.put(files_dir + 'toree_kernel.tar.gz', '/tmp/{}/toree_kernel.tar.gz'.format(args.cluster_name))
    # conn.put(templates_dir + 'toree_dataengine_template.json', '/tmp/{}/toree_dataengine_template.json'.format(args.cluster_name))
    # conn.put(templates_dir + 'run_template.sh', '/tmp/{}/run_template.sh'.format(args.cluster_name))
    datalab.fab.conn.put(
        templates_dir + 'notebook_spark-defaults_local.conf',
        '/tmp/{}/notebook_spark-defaults_local.conf'.format(args.cluster_name))
    spark_master_ip = args.spark_master.split('//')[1].split(':')[0]
    # spark_memory = get_spark_memory(True, args.os_user, spark_master_ip, keyfile)
    # conn.run('echo "spark.executor.memory {0}m" >> /tmp/{1}/notebook_spark-defaults_local.conf'.format(spark_memory, args.cluster_name))
    if not exists(datalab.fab.conn,
                  '/usr/local/bin/deeplearning_dataengine_create_configs.py'):
        datalab.fab.conn.put(
            scripts_dir + 'deeplearning_dataengine_create_configs.py',
            '/tmp/deeplearning_dataengine_create_configs.py')
        datalab.fab.conn.sudo(
            'cp -f /tmp/deeplearning_dataengine_create_configs.py /usr/local/bin/deeplearning_dataengine_create_configs.py'
        )
        datalab.fab.conn.sudo(
            'chmod 755 /usr/local/bin/deeplearning_dataengine_create_configs.py'
        )
    if not exists(datalab.fab.conn, '/usr/lib/python3.8/datalab/'):
        datalab.fab.conn.sudo('mkdir -p /usr/lib/python3.8/datalab/')
        datalab.fab.conn.local(
            'cd  /usr/lib/python3.8/datalab/; tar -zcvf /tmp/datalab.tar.gz *')
        datalab.fab.conn.put('/tmp/datalab.tar.gz', '/tmp/datalab.tar.gz')
        datalab.fab.conn.sudo(
            'tar -zxvf /tmp/datalab.tar.gz -C /usr/lib/python3.8/datalab/')
        datalab.fab.conn.sudo('chmod a+x /usr/lib/python3.8/datalab/*')
        if exists(datalab.fab.conn, '/usr/lib64'):
            datalab.fab.conn.sudo('mkdir -p /usr/lib64/python3.8')
            datalab.fab.conn.sudo(
                'ln -fs /usr/lib/python3.8/datalab /usr/lib64/python3.8/datalab'
            )