def on_shell_open(self, data): if self.ssh_info: self.disconnect() self.ssh_info = { 'backend_id': data['backend_id'], 'machine_id': data['machine_id'], 'host': data['host'], 'columns': data['cols'], 'rows': data['rows'], } log.info("opened shell") self.shell = Shell(data['host']) try: key_id, ssh_user = self.shell.autoconfigure( self.user, data['backend_id'], data['machine_id']) except Exception as exc: if isinstance(exc, MachineUnauthorizedError): err = 'Permission denied (publickey).' else: err = str(exc) self.ssh_info['error'] = err self.emit_shell_data(err) self.disconnect() return self.ssh_info.update(key_id=key_id, ssh_user=ssh_user) self.channel = self.shell.ssh.invoke_shell('xterm', data['cols'], data['rows']) self.spawn(self.get_ssh_data)
def on_shell_open(self, data): log.info("opened shell") self.shell = Shell(data['host']) key_id, ssh_user = self.shell.autoconfigure(self.user, data['backend_id'], data['machine_id']) self.channel = self.shell.ssh.invoke_shell('xterm') self.spawn(self.get_ssh_data)
def shell_stream(request): """Execute command via SSH and stream output Streams output using the hidden iframe technique. """ def parse(lines): """Generator function that converts stdout_lines to html with js which it streams in a hidden iframe. """ # send some blank data to fill the initial buffer and get (webkit) # browsers to display right away what's sent #yield 1024*'\0' # start the html response yield "<html><body>\n" js = "<script type='text/javascript'>parent.appendShell('%s', '%s');</script>\n" for line in lines: # get commands output, line by line clear_line = line.replace('\'', '\\\'') clear_line = clear_line.replace('\n', '<br/>') clear_line = clear_line.replace('\r', '') #.replace('<','<').replace('>', '>') ret = js % (clear_line, cmd_id) yield ret js = "<script type='text/javascript'>" js += "parent.completeShell(%s, '%s');</script>\n" yield js % (1, cmd_id) # FIXME yield "</body></html>\n" log.info("got shell_stream request") backend_id = request.matchdict['backend'] machine_id = request.matchdict['machine'] cmd = request.params.get('command') cmd_id = request.params.get('command_id').encode('utf-8', 'ignore') host = request.params.get('host') try: if not cmd: raise RequiredParameterMissingError("command") if not host: raise RequiredParameterMissingError("host") user = user_from_request(request) shell = Shell(host) shell.autoconfigure(user, backend_id, machine_id) # stdout_lines is a generator that spits out lines of combined # stdout and stderr output. cmd is executed via the shell on the background # and the stdout_lines generator is immediately available. stdout_lines # will block if no line is in the buffer and will stop iterating once the # command is completed and the pipe is closed. stdout_lines = shell.command_stream(cmd) except Exception as e: message = ["Failed to execute command\n", "Error: %s \n" % e] return Response(status=500, app_iter=parse(message)) return Response(status=200, app_iter=parse(stdout_lines))
def ssh_command(email, backend_id, machine_id, host, command, key_id=None, username=None, password=None, port=22): user = user_from_email(email) shell = Shell(host) key_id, ssh_user = shell.autoconfigure(user, backend_id, machine_id, key_id, username, password, port) retval, output = shell.command(command) shell.disconnect() if retval: from mist.io.methods import notify_user notify_user(user, "Async command failed for machine %s (%s)" % (machine_id, host), output)
def post_deploy_steps(self, email, backend_id, machine_id, monitoring, command, key_id=None, username=None, password=None, port=22): from mist.io.methods import ssh_command, connect_provider, enable_monitoring from mist.io.methods import notify_user, notify_admin if multi_user: from mist.core.methods import enable_monitoring from mist.core.helpers import log_event else: from mist.io.methods import enable_monitoring log_event = lambda *args, **kwargs: None user = user_from_email(email) try: # find the node we're looking for and get its hostname conn = connect_provider(user.backends[backend_id]) nodes = conn.list_nodes() node = None for n in nodes: if n.id == machine_id: node = n break if node and len(node.public_ips): # filter out IPv6 addresses ips = filter(lambda ip: ':' not in ip, node.public_ips) host = ips[0] else: raise self.retry(exc=Exception(), countdown=120, max_retries=5) try: from mist.io.shell import Shell shell = Shell(host) # connect with ssh even if no command, to create association # to be able to enable monitoring key_id, ssh_user = shell.autoconfigure(user, backend_id, node.id, key_id, username, password, port) backend = user.backends[backend_id] msg = "Backend:\n Name: %s\n Id: %s\n" % (backend.title, backend_id) msg += "Machine:\n Name: %s\n Id: %s\n" % (node.name, node.id) if command: log_dict = { 'email': email, 'event_type': 'job', 'backend_id': backend_id, 'machine_id': machine_id, 'job_id': uuid.uuid4().hex, 'command': command, 'host': host, 'key_id': key_id, 'ssh_user': ssh_user, } log_event(action='deployment_script_started', **log_dict) start_time = time() retval, output = shell.command(command) execution_time = time() - start_time output = output.decode('utf-8', 'ignore') title = "Deployment script %s" % ('failed' if retval else 'succeeded') notify_user(user, title, backend_id=backend_id, machine_id=machine_id, machine_name=node.name, command=command, output=output, duration=execution_time, retval=retval, error=retval > 0) log_event(action='deployment_script_finished', error=retval > 0, return_value=retval, stdout=output, **log_dict) shell.disconnect() if monitoring: try: enable_monitoring( user, backend_id, node.id, name=node.name, dns_name=node.extra.get('dns_name', ''), public_ips=ips, no_ssh=False, dry=False, ) except Exception as e: print repr(e) notify_user( user, "Enable monitoring failed for machine %s (%s)" % (node.name, node.id), repr(e)) notify_admin( 'Enable monitoring on creation failed for user %s machine %s: %r' % (email, node.name, e)) except (ServiceUnavailableError, SSHException) as exc: raise self.retry(exc=exc, countdown=60, max_retries=5) except Exception as exc: if str(exc).startswith('Retry'): raise notify_user( user, "Deployment script failed for machine %s after 5 retries" % node.id) notify_admin( "Deployment script failed for machine %s in backend %s by user %s after 5 retries" % (node.id, backend_id, email), repr(exc)) log_event( email=email, event_type='job', action='deployment_script_failed', backend_id=backend_id, machine_id=machine_id, enable_monitoring=bool(monitoring), command=command, error="Couldn't connect to run post deploy steps (5 attempts).", )
def run_deploy_script(self, email, backend_id, machine_id, command, key_id=None, username=None, password=None, port=22): from mist.io.methods import ssh_command, connect_provider from mist.io.methods import notify_user, notify_admin user = user_from_email(email) try: # find the node we're looking for and get its hostname conn = connect_provider(user.backends[backend_id]) nodes = conn.list_nodes() node = None for n in nodes: if n.id == machine_id: node = n break if node and len(node.public_ips): # filter out IPv6 addresses ips = filter(lambda ip: ':' not in ip, node.public_ips) host = ips[0] else: raise self.retry(exc=Exception(), countdown=60, max_retries=5) try: from mist.io.shell import Shell shell = Shell(host) key_id, ssh_user = shell.autoconfigure(user, backend_id, node.id, key_id, username, password, port) start_time = time() retval, output = shell.command(command) execution_time = time() - start_time shell.disconnect() msg = """ Command: %s Return value: %s Duration: %s seconds Output: %s""" % (command, retval, execution_time, output) if retval: notify_user( user, "Deployment script failed for machine %s (%s)" % (node.name, node.id), msg) amqp_log( "Deployment script failed for user %s machine %s (%s): %s" % (user, node.name, node.id, msg)) else: notify_user( user, "Deployment script succeeded for machine %s (%s)" % (node.name, node.id), msg) amqp_log( "Deployment script succeeded for user %s machine %s (%s): %s" % (user, node.name, node.id, msg)) except ServiceUnavailableError as exc: raise self.retry(exc=exc, countdown=60, max_retries=5) except Exception as exc: if str(exc).startswith('Retry'): return amqp_log( "Deployment script failed for machine %s in backend %s by user %s after 5 retries: %s" % (node.id, backend_id, email, repr(exc))) notify_user( user, "Deployment script failed for machine %s after 5 retries" % node.id) notify_admin( "Deployment script failed for machine %s in backend %s by user %s after 5 retries" % (node.id, backend_id, email), repr(exc))
def post_deploy_steps(self, email, cloud_id, machine_id, monitoring, command, key_id=None, username=None, password=None, port=22, script_id='', script_params='', job_id=None, hostname='', plugins=None, post_script_id='', post_script_params=''): from mist.io.methods import connect_provider, probe_ssh_only from mist.io.methods import notify_user, notify_admin from mist.io.methods import create_dns_a_record if multi_user: from mist.core.methods import enable_monitoring from mist.core.tasks import run_script from mist.core.helpers import log_event else: from mist.io.methods import enable_monitoring log_event = lambda *args, **kwargs: None job_id = job_id or uuid.uuid4().hex user = user_from_email(email) tmp_log = lambda msg, *args: log.error('Post deploy: %s' % msg, *args) tmp_log('Entering post deploy steps for %s %s %s', user.email, cloud_id, machine_id) try: # find the node we're looking for and get its hostname node = None try: conn = connect_provider(user.clouds[cloud_id]) nodes = conn.list_nodes() # TODO: use cache for n in nodes: if n.id == machine_id: node = n break tmp_log('run list_machines') except: raise self.retry(exc=Exception(), countdown=10, max_retries=10) if node and len(node.public_ips): # filter out IPv6 addresses ips = filter(lambda ip: ':' not in ip, node.public_ips) host = ips[0] else: tmp_log('ip not found, retrying') raise self.retry(exc=Exception(), countdown=60, max_retries=20) try: from mist.io.shell import Shell shell = Shell(host) # connect with ssh even if no command, to create association # to be able to enable monitoring tmp_log('attempting to connect to shell') key_id, ssh_user = shell.autoconfigure(user, cloud_id, node.id, key_id, username, password, port) tmp_log('connected to shell') result = probe_ssh_only(user, cloud_id, machine_id, host=None, key_id=key_id, ssh_user=ssh_user, shell=shell) log_dict = { 'email': email, 'event_type': 'job', 'cloud_id': cloud_id, 'machine_id': machine_id, 'job_id': job_id, 'host': host, 'key_id': key_id, 'ssh_user': ssh_user, } log_event(action='probe', result=result, **log_dict) cloud = user.clouds[cloud_id] msg = "Cloud:\n Name: %s\n Id: %s\n" % (cloud.title, cloud_id) msg += "Machine:\n Name: %s\n Id: %s\n" % (node.name, node.id) if hostname: try: record = create_dns_a_record(user, hostname, host) hostname = '.'.join((record.name, record.zone.domain)) log_event(action='create_dns_a_record', hostname=hostname, **log_dict) except Exception as exc: log_event(action='create_dns_a_record', error=str(exc), **log_dict) error = False if script_id and multi_user: tmp_log('will run script_id %s', script_id) ret = run_script.run(user.email, script_id, cloud_id, machine_id, params=script_params, host=host, job_id=job_id) error = ret['error'] tmp_log('executed script_id %s', script_id) elif command: tmp_log('will run command %s', command) log_event(action='deployment_script_started', command=command, **log_dict) start_time = time() retval, output = shell.command(command) tmp_log('executed command %s', command) execution_time = time() - start_time output = output.decode('utf-8', 'ignore') title = "Deployment script %s" % ('failed' if retval else 'succeeded') error = retval > 0 notify_user(user, title, cloud_id=cloud_id, machine_id=machine_id, machine_name=node.name, command=command, output=output, duration=execution_time, retval=retval, error=retval > 0) log_event(action='deployment_script_finished', error=retval > 0, return_value=retval, command=command, stdout=output, **log_dict) shell.disconnect() if monitoring: try: enable_monitoring( user, cloud_id, node.id, name=node.name, dns_name=node.extra.get('dns_name', ''), public_ips=ips, no_ssh=False, dry=False, job_id=job_id, plugins=plugins, deploy_async=False, ) except Exception as e: print repr(e) error = True notify_user( user, "Enable monitoring failed for machine %s" % machine_id, repr(e)) notify_admin( 'Enable monitoring on creation failed for user %s machine %s: %r' % (email, machine_id, e)) log_event(action='enable_monitoring_failed', error=repr(e), **log_dict) if post_script_id and multi_user: tmp_log('will run post_script_id %s', post_script_id) ret = run_script.run( user.email, post_script_id, cloud_id, machine_id, params=post_script_params, host=host, job_id=job_id, action_prefix='post_', ) error = ret['error'] tmp_log('executed post_script_id %s', script_id) log_event(action='post_deploy_finished', error=error, **log_dict) except (ServiceUnavailableError, SSHException) as exc: tmp_log(repr(exc)) raise self.retry(exc=exc, countdown=60, max_retries=15) except Exception as exc: tmp_log(repr(exc)) if str(exc).startswith('Retry'): raise notify_user(user, "Deployment script failed for machine %s" % machine_id) notify_admin( "Deployment script failed for machine %s in cloud %s by user %s" % (machine_id, cloud_id, email), repr(exc)) log_event(email=email, event_type='job', action='post_deploy_finished', cloud_id=cloud_id, machine_id=machine_id, enable_monitoring=bool(monitoring), command=command, error="Couldn't connect to run post deploy steps.", job_id=job_id)