def main(): globals()['is_cli'] = True shell.ignore_closed_pipes() util.log.setup(format='%(message)s') try: stream = util.hacks.override('--stream') with (shell.set_stream() if stream else mock.MagicMock()): shell.dispatch_commands(globals(), __name__) except AssertionError as e: if e.args: logging.info(util.colors.red(e.args[0])) sys.exit(1)
def main(): globals()['is_cli'] = True shell.ignore_closed_pipes() util.log.setup(format='%(message)s') with util.log.disable('botocore', 'boto3'): try: stream = util.hacks.override('--stream') with (shell.set_stream() if stream else mock.MagicMock()): shell.dispatch_commands(globals(), __name__) except AssertionError as e: if e.args: logging.info(util.colors.red(e.args[0])) sys.exit(1)
def main(): globals()['is_cli'] = True shell.ignore_closed_pipes() util.log.setup(format='%(message)s') with util.log.disable('botocore', 'boto3'): try: stream = util.hacks.override('--stream') with (shell.set_stream() if stream else mock.MagicMock()): with aws.ec2._region(os.environ.get('region')): shell.dispatch_commands(globals(), __name__) except AssertionError as e: if e.args: logging.debug(util.colors.red(e.args[0])) sys.exit(1)
def ec2_name(): with sh.set_stream(): name = str(uuid.uuid4()) try: yield name except: raise finally: try: ids = run('aws-ec2-id', name).splitlines() except: pass else: print('cleaning up left over ec2 ids:', *ids) run('aws-ec2-rm -y', *ids)
def new(name: 'name of all instances', arg: 'one instance per arg, and that arg is str formatted into cmd, pre_cmd, and tags as "arg"' = None, label: 'one label per arg, to use as ec2 tag since arg is often inapproriate, defaults to arg if not provided' = None, pre_cmd: 'optional cmd which runs before cmd is backgrounded. will be retried on failure. format with %(arg)s.' = None, cmd: 'cmd which is run in the background. format with %(arg)s.' = None, tag: 'tag to set as "<key>=<value>' = None, no_rm: 'stop instance instead of terminating when done' = False, chunk_size: 'how many args to launch at once' = 50, bucket: 's3 bucket to upload logs to' = shell.conf.get_or_prompt_pref('launch_logs_bucket', __file__, message='bucket for launch_logs'), spot: 'spot price to bid' = None, key: 'key pair name' = shell.conf.get_or_prompt_pref('key', aws.ec2.__file__, message='key pair name'), ami: 'ami id' = shell.conf.get_or_prompt_pref('ami', aws.ec2.__file__, message='ami id'), sg: 'security group name' = shell.conf.get_or_prompt_pref('sg', aws.ec2.__file__, message='security group name'), type: 'instance type' = shell.conf.get_or_prompt_pref('type', aws.ec2.__file__, message='instance type'), vpc: 'vpc name' = shell.conf.get_or_prompt_pref('vpc', aws.ec2.__file__, message='vpc name'), zone: 'ec2 availability zone' = None, gigs: 'gb capacity of primary disk' = 8): optional = ['no_rm', 'zone', 'spot', 'tag', 'pre_cmd', 'label'] for k, v in locals().items(): assert v is not None or k in optional, 'required flag missing: --' + k.replace('_', '-') tags, args, labels = tuple(tag or ()), tuple(arg or ()), tuple(label or ()) args = [str(a) for a in args] if labels: assert len(args) == len(labels), 'there must be an equal number of args and labels, %s != %s' % (len(args), len(labels)) else: labels = args labels = [_tagify(x) for x in labels] for tag in tags: assert '=' in tag, 'tags should be "<key>=<value>", not: %s' % tag for label, arg in zip(labels, args): if label == arg: logging.info('going to launch arg: %s', arg) else: logging.info('going to launch label: %s, arg: %s', label, arg) if pre_cmd and os.path.exists(pre_cmd): logging.info('reading pre_cmd from file: %s', os.path.abspath(pre_cmd)) with open(pre_cmd) as f: pre_cmd = f.read() if os.path.exists(cmd): logging.info('reading cmd from file: %s', os.path.abspath(cmd)) with open(cmd) as f: cmd = f.read() for _ in range(10): launch = str(uuid.uuid4()) path = 's3://%(bucket)s/launch_logs/launch=%(launch)s' % locals() try: shell.run('aws s3 ls', path) except: break else: assert False, 'failed to generate a unique launch id. clean up: s3://%(bucket)s/launch_logs/' % locals() logging.info('launch=%s', launch) data = json.dumps({'name': name, 'args': args, 'labels': labels, 'pre_cmd': pre_cmd, 'cmd': cmd, 'tags': tags, 'no_rm': no_rm, 'bucket': bucket, 'spot': spot, 'key': key, 'ami': ami, 'sg': sg, 'type': type, 'vpc': vpc, 'gigs': gigs}) if 'AWS_LAUNCH_RUN_LOCAL' in os.environ: for arg in args: with shell.tempdir(), shell.set_stream(): shell.run(pre_cmd % {'arg': arg}) shell.run(cmd % {'arg': arg}) else: shell.run('aws s3 cp - s3://%(bucket)s/launch_logs/launch=%(launch)s/params.json' % locals(), stdin=data) tags += ('launch=%s' % launch,) for i, (args_chunk, labels_chunk) in enumerate(zip(chunk(args, chunk_size), chunk(labels, chunk_size))): logging.info('launching chunk %s of %s, chunk size: %s', i + 1, len(args) // chunk_size + 1, chunk_size) instance_ids = aws.ec2.new(name, spot=spot, key=key, ami=ami, sg=sg, type=type, vpc=vpc, zone=zone, gigs=gigs, num=len(args_chunk)) errors = [] def run_cmd(instance_id, arg, label): def fn(): try: if pre_cmd: aws.ec2._retry(aws.ec2.ssh)(instance_id, yes=True, cmd=pre_cmd % {'arg': arg}, prefixed=True) aws.ec2.ssh(instance_id, no_tty=True, yes=True, cmd=_cmd(arg, cmd, no_rm, bucket), prefixed=True) instance = aws.ec2._ls([instance_id])[0] aws.ec2._retry(instance.create_tags)(Tags=[{'Key': k, 'Value': v} for tag in tags + ('label=%s' % label, 'chunk=%s' % i) for [k, v] in [tag.split('=', 1)]]) logging.info('tagged: %s', aws.ec2._pretty(instance)) logging.info('ran cmd against %s for label %s', instance_id, label) except: errors.append(traceback.format_exc()) return fn pool.thread.wait(*map(run_cmd, instance_ids, args_chunk, labels_chunk), max_threads=10) if errors: logging.info(util.colors.red('errors:')) for e in errors: logging.info(e) sys.exit(1) return 'launch=%s' % launch
def new( name: 'name of all instances', arg: 'one instance per arg, and that arg is str formatted into cmd, pre_cmd, and tags as {arg}' = None, label: 'one label per arg, to use as ec2 tag since arg is often inapproriate, defaults to arg if not provided' = None, pre_cmd: 'optional cmd which runs before cmd is backgrounded. will be retried on failure. format with {arg}.' = None, cmd: 'cmd which is run in the background. format with {arg}.' = None, tag: 'tag to set as "<key>=<value>' = None, no_rm: 'stop instance instead of terminating when done' = False, chunk_size: 'how many args to launch at once' = 50, bucket: 's3 bucket to upload logs to' = shell.conf.get_or_prompt_pref( 'launch_logs_bucket', __file__, message='bucket for launch_logs'), spot: 'spot price to bid' = None, key: 'key pair name' = shell.conf.get_or_prompt_pref( 'key', aws.ec2.__file__, message='key pair name'), ami: 'ami id' = shell.conf.get_or_prompt_pref('ami', aws.ec2.__file__, message='ami id'), sg: 'security group name' = shell.conf.get_or_prompt_pref( 'sg', aws.ec2.__file__, message='security group name'), type: 'instance type' = shell.conf.get_or_prompt_pref( 'type', aws.ec2.__file__, message='instance type'), vpc: 'vpc name' = shell.conf.get_or_prompt_pref('vpc', aws.ec2.__file__, message='vpc name'), zone: 'ec2 availability zone' = None, role: 'ec2 iam role' = None, gigs: 'gb capacity of primary disk' = 8): optional = ['no_rm', 'zone', 'spot', 'tag', 'pre_cmd', 'label'] for k, v in locals().items(): assert v is not None or k in optional, 'required flag missing: --' + k.replace( '_', '-') tags, args, labels = tuple(tag or ()), tuple(arg or ()), tuple(label or ()) args = [str(a) for a in args] if labels: assert len(args) == len( labels ), 'there must be an equal number of args and labels, %s != %s' % ( len(args), len(labels)) else: labels = args labels = [_tagify(x) for x in labels] for tag in tags: assert '=' in tag, 'tags should be "<key>=<value>", not: %s' % tag for label, arg in zip(labels, args): if label == arg: logging.info('going to launch arg: %s', arg) else: logging.info('going to launch label: %s, arg: %s', label, arg) if pre_cmd and os.path.exists(pre_cmd): logging.info('reading pre_cmd from file: %s', os.path.abspath(pre_cmd)) with open(pre_cmd) as f: pre_cmd = f.read() if os.path.exists(cmd): logging.info('reading cmd from file: %s', os.path.abspath(cmd)) with open(cmd) as f: cmd = f.read() for _ in range(10): launch = str(uuid.uuid4()) path = 's3://%(bucket)s/launch_logs/launch=%(launch)s' % locals() try: shell.run('aws s3 ls', path) except: break else: assert False, 'failed to generate a unique launch id. clean up: s3://%(bucket)s/launch_logs/' % locals( ) logging.info('launch=%s', launch) data = json.dumps({ 'name': name, 'args': args, 'labels': labels, 'pre_cmd': pre_cmd, 'cmd': cmd, 'tags': tags, 'no_rm': no_rm, 'bucket': bucket, 'spot': spot, 'key': key, 'ami': ami, 'sg': sg, 'type': type, 'vpc': vpc, 'gigs': gigs }) if 'LAUNCH_LOCAL' in os.environ: for arg in args: with shell.tempdir(), shell.set_stream(): shell.run(pre_cmd.format(arg=arg)) shell.run(cmd.format(arg=arg)) else: shell.run( 'aws s3 cp - s3://%(bucket)s/launch_logs/launch=%(launch)s/params.json' % locals(), stdin=data) tags += ('launch=%s' % launch, ) for i, (args_chunk, labels_chunk) in enumerate( zip(chunk(args, chunk_size), chunk(labels, chunk_size))): logging.info('launching chunk %s of %s, chunk size: %s', i + 1, len(args) // chunk_size + 1, chunk_size) instance_ids = aws.ec2.new(name, role=role, spot=spot, key=key, ami=ami, sg=sg, type=type, vpc=vpc, zone=zone, gigs=gigs, num=len(args_chunk)) errors = [] def run_cmd(instance_id, arg, label): def fn(): try: if pre_cmd: aws.ec2._retry(aws.ec2.ssh)( instance_id, yes=True, cmd=pre_cmd.format(arg=arg), prefixed=True) aws.ec2.ssh(instance_id, no_tty=True, yes=True, cmd=_cmd(arg, cmd, no_rm, bucket), prefixed=True) instance = aws.ec2._ls([instance_id])[0] aws.ec2._retry(instance.create_tags)( Tags=[{ 'Key': k, 'Value': v } for tag in tags + ('label=%s' % label, 'chunk=%s' % i) for [k, v] in [tag.split('=', 1)]]) logging.info('tagged: %s', aws.ec2._pretty(instance)) logging.info('ran cmd against %s for label %s', instance_id, label) except: errors.append(traceback.format_exc()) return fn pool.thread.wait(*map(run_cmd, instance_ids, args_chunk, labels_chunk), max_threads=10) if errors: logging.info(util.colors.red('errors:')) for e in errors: logging.info(e) sys.exit(1) return 'launch=%s' % launch