def install_marathon(ctx, ssh): project = Project.from_dir(path=ctx.obj['cwd']) click.echo('Step 1/3: Zookeeper') project.salt('state.sls', args=['cdh5.zookeeper'], target='master', ssh=ssh) click.echo('Step 2/3: Mesos') project.salt('state.sls', args=['mesos.cluster'], target='*', ssh=ssh) click.echo('Step 3/3: Marathon') project.salt('state.sls', args=['mesos.marathon'], target='master', ssh=ssh)
def install_impala(ctx, ssh): project = Project.from_dir(path=ctx.obj['cwd']) click.echo('Step 1/4: Zookeeper') project.salt('state.sls', args=['cdh5.zookeeper'], target='master', ssh=ssh) click.echo('Step 2/4: HDFS') project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh) click.echo('Step 3/4: Hive Metastore') project.salt('state.sls', args=['cdh5.hive.metastore'], target='master', ssh=ssh) click.echo('Step 4/4: Impala') project.salt('state.sls', args=['cdh5.impala.cluster'], target='*', ssh=ssh)
def install_spark(ctx, ssh): project = Project.from_dir(path=ctx.obj['cwd']) click.echo('Step 1/4: Zookeeper') project.salt('state.sls', args=['cdh5.zookeeper'], target='master', ssh=ssh) click.echo('Step 2/4: HDFS') project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh) click.echo('Step 3/4: Mesos') project.salt('state.sls', args=['mesos.cluster'], target='*', ssh=ssh) click.echo('Step 4/4: Spark on Mesos') project.salt('state.sls', args=['mesos.spark'], target='master', ssh=ssh)
def ssh(ctx, node): project = Project.from_dir(path=ctx.obj['cwd']) node = project.cluster.instances[node] ip = node.ip username = node.username keypair = os.path.expanduser(node.keypair) cmd = ['ssh', username + '@' + ip] cmd = cmd + ['-i', keypair] cmd = cmd + ['-oStrictHostKeyChecking=no'] click.echo(' '.join(cmd)) subprocess.call(cmd)
def sync(ctx, continuous, skip): project = Project.from_dir(path=ctx.obj['cwd']) handler = RsyncHandler() handler.project = project if not skip: click.echo('Syncing salt formulas and pillars') handler.sync_all() if continuous: click.echo('Waiting for changes on the file system') sync_loop(project, handler)
def new_func(ctx, settingsfile, log_level, *args, **kwargs): if 'log_level' not in ctx.obj: if log_level == 'info': log_level = logging.INFO elif log_level == 'debug': log_level = logging.DEBUG elif log_level == 'warning': log_level = logging.WARNING elif log_level == 'error': log_level = logging.ERROR setup_logging(log_level) ctx.obj['log_level'] = log_level if 'project' not in ctx.obj: if settingsfile: project = Project.from_file(settingsfile) else: project = Project.from_dir(path=ctx.obj['cwd']) ctx.obj['project'] = project return ctx.invoke(func, *args, **kwargs)
def test_generate_salt_ssh_master_conf(tmpdir): path = tmpdir.dirname p = Project(path=path) master = salt.generate_salt_ssh_master_conf(p) assert master['root_dir'] == os.path.join(p.settings_dir) assert master['cachedir'] == os.path.join(p.settings_dir, 'var', 'cache', 'salt') assert master['file_roots']['base'] == [ os.path.join(p.settings_dir, 'salt') ] assert master['pillar_roots']['base'] == [ os.path.join(p.settings_dir, 'pillar') ]
def install_salt(ctx): project = Project.from_dir(path=ctx.obj['cwd']) pillar_template = """pillar='{"salt": {"master": {"ip": "%s"}, "minion": {"roles": %s } } }' """ click.echo('Installing salt master in the head') project.salt('state.sls', args=['salt.master'], target='master', ssh=True) click.echo('Installing salt minion in the head') roles_txt = ['"%s"' % role for role in master_roles] roles_txt = '[%s]' % ', '.join(roles_txt) pillars = pillar_template % (project.cluster.master.ip, roles_txt) project.salt('state.sls', args=['salt.minion', pillars], target='master', ssh=True) if len(project.cluster) > 1: click.echo('Installing salt minion in the compute nodes') roles_txt = ['"%s"' % role for role in minion_roles] roles_txt = '[%s]' % ', '.join(roles_txt) pillars = pillar_template % (project.cluster.master.ip, roles_txt) project.salt('state.sls', args=['salt.minion', pillars], target='minion*', ssh=True) click.echo('Syncing formulas') ctx.invoke(sync)
def up(ctx, salt): project = Project.from_dir(path=ctx.obj['cwd']) click.echo('Creating cluster') project.create_cluster() click.echo('Creating metadata') project.save_instances() project.update() click.echo('Checking SSH Connection') ssh_status = project.cluster.check_ssh() if all(ssh_status.values()): click.echo('SSH connection to all nodes OK') else: click.echo('SSH connection to some nodes did not work.', err=True) click.echo('This might be just the cloud provider being slow, wait a while and try again', err=True) click.echo(ssh_status, err=True) sys.exit(1) if salt: click.echo('Installing salt (master)') ctx.invoke(install_salt)
def install_notebook(ctx, ssh): project = Project.from_dir(path=ctx.obj['cwd']) click.echo('Step 1/1: Jupyter Notebook') project.salt('state.sls', args=['ipython.notebook'], target='master', ssh=ssh)
def cmd(ctx, command, ssh): project = Project.from_dir(path=ctx.obj['cwd']) project.salt('cmd.run', args=[command], ssh=ssh)
def install_miniconda(ctx, ssh, target): project = Project.from_dir(path=ctx.obj['cwd']) project.salt('state.sls', args=['miniconda'], target=target, ssh=ssh) if not ssh: project.salt('saltutil.sync_all', target=target)
def destroy(ctx): click.echo('Destroying cluster') project = Project.from_dir(path=ctx.obj['cwd']) project.destroy()
def salt(ctx, target, module, args, ssh): project = Project.from_dir(path=ctx.obj['cwd']) project.salt(module, args=args, target=target, ssh=ssh)
from datasciencebox.core import salt from datasciencebox.core.project import Project from datasciencebox.core.cloud.cluster import Cluster from datasciencebox.core.cloud.instance import Instance cluster = Cluster() cluster.instances.append(Instance(ip='0.0.0.0', username='******', keypair='/home/ubuntu/.ssh/id_rsa')) cluster.instances.append(Instance(ip='1.1.1.1:2222', username='******', keypair='/home/ubuntu/.ssh/id_rsa2')) cluster.instances.append(Instance(ip='2.2.2.2', port='3333', username='******', keypair='/home/ubuntu/.ssh/id_rsa3')) project = Project() project.cluster = cluster head_roles = ['head', 'head2', 'conda'] compute_roles = ['minion2', 'conda'] salt.HEAD_ROLES = head_roles salt.COMPUTE_ROLES = compute_roles def test_generate_salt_ssh_master_conf(tmpdir): path = tmpdir.dirname p = Project(path=path) master = salt.generate_salt_ssh_master_conf(p) assert master['root_dir'] == os.path.join(p.settings_dir) assert master['cachedir'] == os.path.join(p.settings_dir, 'var', 'cache', 'salt') assert master['file_roots']['base'] == [os.path.join(p.settings_dir, 'salt')]
def install_pkg(ctx, pkg, ssh, target): project = Project.from_dir(path=ctx.obj['cwd']) args = [pkg] project.salt('pkg.install', args=args, target=target, ssh=ssh)
def install_conda(ctx, pkg, ssh, target): project = Project.from_dir(path=ctx.obj['cwd']) project.salt('conda.install', args=[pkg], kwargs={'user': project.settings['USERNAME']}, target=target, ssh=ssh)
def open_hdfs(ctx): project = Project.from_dir(path=ctx.obj['cwd']) url = 'http://%s:50070' % project.cluster.master.ip webbrowser.open(url, new=2)
def update(ctx): project = Project.from_dir(path=ctx.obj['cwd']) project.update()
def install_hdfs(ctx, ssh): project = Project.from_dir(path=ctx.obj['cwd']) click.echo('Step 1/1: HDFS') project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh)
import pytest from datasciencebox.core.project import Project from datasciencebox.core.settings import Settings from datasciencebox.core.cloud.cluster import Cluster settings = Settings() settings['USERNAME'] = '******' settings['KEYPAIR'] = '~/.ssh/id_rsa' _ = [{'id': 0, 'ip': '0.0.0.0'}, {'id': 1, 'ip': '1.1.1.1'}, {'id': 2, 'ip': '2.2.2.2'}] cluster = Cluster.from_list(_, settings) project = Project() project.settings = settings project.cluster = cluster
def open_notebook(ctx): project = Project.from_dir(path=ctx.obj['cwd']) url = 'http://%s:8888' % project.cluster.master.ip webbrowser.open(url, new=2)
from datasciencebox.core.cloud.instance import Instance cluster = Cluster() cluster.instances.append( Instance(ip='0.0.0.0', username='******', keypair='/home/ubuntu/.ssh/id_rsa')) cluster.instances.append( Instance(ip='1.1.1.1:2222', username='******', keypair='/home/ubuntu/.ssh/id_rsa2')) cluster.instances.append( Instance(ip='2.2.2.2', port='3333', username='******', keypair='/home/ubuntu/.ssh/id_rsa3')) project = Project() project.cluster = cluster head_roles = ['head', 'head2', 'conda'] compute_roles = ['minion2', 'conda'] salt.HEAD_ROLES = head_roles salt.COMPUTE_ROLES = compute_roles def test_generate_salt_ssh_master_conf(tmpdir): path = tmpdir.dirname p = Project(path=path) master = salt.generate_salt_ssh_master_conf(p) assert master['root_dir'] == os.path.join(p.settings_dir) assert master['cachedir'] == os.path.join(p.settings_dir, 'var', 'cache', 'salt')