def check_if_machine_accessible(self, machine): """Attempt to port knock and ping the machine""" assert machine.cloud.id == self.cloud.id hostname = machine.hostname or (machine.private_ips[0] if machine.private_ips else '') if not hostname: return False ports_list = [22, 80, 443, 3389] for port in (machine.ssh_port, machine.rdp_port): if port and port not in ports_list: ports_list.insert(0, port) socket_timeout = 3 # add timeout for socket for port in ports_list: log.info("Attempting to connect to %s:%d", hostname, port) try: s = socket.create_connection( dnat(self.cloud.owner, hostname, port), socket_timeout) s.shutdown(2) except: log.info("Failed to connect to %s:%d", hostname, port) continue log.info("Connected to %s:%d", hostname, port) return True try: log.info("Pinging %s", hostname) from mist.api.methods import ping ping_res = ping(owner=self.cloud.owner, host=hostname, pkts=1) if int(ping_res.get('packets_rx', 0)) > 0: log.info("Successfully pinged %s", hostname) return True except: log.info("Failed to ping %s", hostname) pass return False
def _connect(self): host, port = dnat(self.cloud.owner, self.cloud.host, self.cloud.port) # TLS authentication. if self.cloud.key_file and self.cloud.cert_file: key_temp_file = tempfile.NamedTemporaryFile(delete=False) key_temp_file.write(self.cloud.key_file) key_temp_file.close() cert_temp_file = tempfile.NamedTemporaryFile(delete=False) cert_temp_file.write(self.cloud.cert_file) cert_temp_file.close() ca_cert = None if self.cloud.ca_cert_file: ca_cert_temp_file = tempfile.NamedTemporaryFile(delete=False) ca_cert_temp_file.write(self.cloud.ca_cert_file) ca_cert_temp_file.close() ca_cert = ca_cert_temp_file.name return get_driver(Provider.DOCKER)(host=host, port=port, key_file=key_temp_file.name, cert_file=cert_temp_file.name, ca_cert=ca_cert, verify_match_hostname=False) # Username/Password authentication. return get_driver(Provider.DOCKER)(self.cloud.username, self.cloud.password, host, port)
def _connect(self): host = dnat(self.cloud.owner, self.cloud.host) return get_driver(self.provider)(self.cloud.username, self.cloud.password, host=host, port=int(self.cloud.port), verify_match_hostname=False)
def get_docker_endpoint(self, owner, cloud_id, job_id=None): if job_id: event = get_story(owner.id, job_id) assert owner.id == event['owner_id'], 'Owner ID mismatch!' self.host, docker_port = config.DOCKER_IP, config.DOCKER_PORT return docker_port, event['logs'][0]['container_id'] cloud = Cloud.objects.get(owner=owner, id=cloud_id, deleted=None) self.host, docker_port = dnat(owner, self.host, cloud.port) return docker_port, cloud
def _connect(self): url = dnat(self.cloud.owner, self.cloud.url) return get_driver(Provider.OPENSTACK)( self.cloud.username, self.cloud.password, ex_force_auth_version='2.0_password', ex_force_auth_url=url, ex_tenant_name=self.cloud.tenant, ex_force_service_region=self.cloud.region, ex_force_base_url=self.cloud.compute_endpoint, )
def _connect(self): """Three supported ways to connect: local system, qemu+tcp, qemu+ssh""" import libcloud.compute.drivers.libvirt_driver libvirt_driver = libcloud.compute.drivers.libvirt_driver libvirt_driver.ALLOW_LIBVIRT_LOCALHOST = config.ALLOW_LIBVIRT_LOCALHOST if self.cloud.key: host, port = dnat(self.cloud.owner, self.cloud.host, self.cloud.port) return get_driver(Provider.LIBVIRT)(host, hypervisor=self.cloud.host, user=self.cloud.username, ssh_key=self.cloud.key.private, ssh_port=int(port)) else: host, port = dnat(self.cloud.owner, self.cloud.host, 5000) return get_driver(Provider.LIBVIRT)(host, hypervisor=self.cloud.host, user=self.cloud.username, tcp_port=int(port))
def load(self, machines=None): self.hosts = {} self.keys = {} if not machines: clouds = Cloud.objects(owner=self.owner, deleted=None) machines = [(machine.cloud.id, machine.machine_id) for machine in Machine.objects(cloud__in=clouds)] for bid, mid in machines: try: name, ip_addr = self.find_machine_details(bid, mid) key_id, ssh_user, port = self.find_ssh_settings(bid, mid) except Exception as exc: print exc continue ip_addr, port = dnat(self.owner, ip_addr, port) if key_id not in self.keys: keypair = SSHKey.objects.get(owner=self.owner, name=key_id, deleted=None) self.keys[key_id] = keypair.private if isinstance(keypair, SignedSSHKey): # if signed ssh key, provide the key appending a -cert.pub # on the name since this is how ssh will include it as # an identify file self.keys['%s-cert.pub' % key_id] = keypair.certificate # pub key also needed for openssh 7.2 self.keys['%s.pub' % key_id] = keypair.public if name in self.hosts: num = 2 while ('%s-%d' % (name, num)) in self.hosts: num += 1 name = '%s-%d' % (name, num) self.hosts[name] = { 'ansible_ssh_host': ip_addr, 'ansible_ssh_port': port, 'ansible_ssh_user': ssh_user, 'ansible_ssh_private_key_file': 'id_rsa/%s' % key_id, }
def machine_rdp(request): """ Rdp file for windows machines Generate and return an rdp file for windows machines READ permission required on cloud. READ permission required on machine. --- cloud: in: path required: true type: string machine: in: path required: true type: string rdp_port: default: 3389 in: query required: true type: integer host: in: query required: true type: string """ cloud_id = request.matchdict.get('cloud') auth_context = auth_context_from_request(request) if cloud_id: # this is depracated, keep it for backwards compatibility machine_id = request.matchdict['machine'] auth_context.check_perm("cloud", "read", cloud_id) try: machine = Machine.objects.get(cloud=cloud_id, machine_id=machine_id, state__ne='terminated') # used by logging_view_decorator request.environ['machine_uuid'] = machine.id except Machine.DoesNotExist: raise NotFoundError("Machine %s doesn't exist" % machine_id) else: machine_uuid = request.matchdict['machine_uuid'] try: machine = Machine.objects.get(id=machine_uuid, state__ne='terminated') # used by logging_view_decorator request.environ['machine_id'] = machine.machine_id request.environ['cloud_id'] = machine.cloud.id except Machine.DoesNotExist: raise NotFoundError("Machine %s doesn't exist" % machine_uuid) cloud_id = machine.cloud.id auth_context.check_perm("cloud", "read", cloud_id) auth_context.check_perm("machine", "read", machine.id) rdp_port = request.params.get('rdp_port', 3389) host = request.params.get('host') if not host: raise BadRequestError('No hostname specified') try: 1 < int(rdp_port) < 65535 except (ValueError, TypeError): rdp_port = 3389 host, rdp_port = dnat(auth_context.owner, host, rdp_port) rdp_content = 'full address:s:%s:%s\nprompt for credentials:i:1' % \ (host, rdp_port) return Response(content_type='application/octet-stream', content_disposition='attachment; filename="%s.rdp"' % host, charset='utf8', pragma='no-cache', body=rdp_content)
def autoconfigure(self, owner, cloud_id, machine_id, key_id=None, username=None, password=None, port=22): """Autoconfigure SSH client. This will do its best effort to find a suitable key and username and will try to connect. If it fails it raises MachineUnauthorizedError, otherwise it initializes self and returns a (key_id, ssh_user) tuple. If connection succeeds, it updates the association information in the key with the current timestamp and the username used to connect. """ log.info("autoconfiguring Shell for machine %s:%s", cloud_id, machine_id) cloud = Cloud.objects.get(owner=owner, id=cloud_id, deleted=None) try: machine = Machine.objects.get(cloud=cloud, machine_id=machine_id) except me.DoesNotExist: machine = Machine(cloud=cloud, machine_id=machine_id) if key_id: keys = [Key.objects.get(owner=owner, id=key_id, deleted=None)] else: keys = [ key_assoc.keypair for key_assoc in machine.key_associations if isinstance(key_assoc.keypair, Key) ] if username: users = [username] else: users = list( set([ key_assoc.ssh_user for key_assoc in machine.key_associations if key_assoc.ssh_user ])) if not users: for name in [ 'root', 'ubuntu', 'ec2-user', 'user', 'azureuser', 'core', 'centos', 'cloud-user', 'fedora' ]: if not name in users: users.append(name) if port != 22: ports = [port] else: ports = list( set([key_assoc.port for key_assoc in machine.key_associations])) if 22 not in ports: ports.append(22) # store the original destination IP to prevent rewriting it when NATing ssh_host = self.host for key in keys: for ssh_user in users: for port in ports: try: # store the original ssh port in case of NAT # by the OpenVPN server ssh_port = port self.host, port = dnat(owner, ssh_host, port) log.info("ssh -i %s %s@%s:%s", key.name, ssh_user, self.host, port) cert_file = '' if isinstance(key, SignedSSHKey): cert_file = key.certificate self.connect(username=ssh_user, key=key, password=password, cert_file=cert_file, port=port) except MachineUnauthorizedError: continue retval, resp = self.command('uptime') new_ssh_user = None if 'Please login as the user ' in resp: new_ssh_user = resp.split()[5].strip('"') elif 'Please login as the' in resp: # for EC2 Amazon Linux machines, usually with ec2-user new_ssh_user = resp.split()[4].strip('"') if new_ssh_user: log.info("retrying as %s", new_ssh_user) try: self.disconnect() cert_file = '' if isinstance(key, SignedSSHKey): cert_file = key.certificate self.connect(username=new_ssh_user, key=key, password=password, cert_file=cert_file, port=port) ssh_user = new_ssh_user except MachineUnauthorizedError: continue # we managed to connect successfully, return # but first update key updated = False for key_assoc in machine.key_associations: if key_assoc.keypair == key: key_assoc.ssh_user = ssh_user updated = True trigger_session_update_flag = True break if not updated: trigger_session_update_flag = True # in case of a private host do NOT update the key # associations with the port allocated by the OpenVPN # server, instead use the original ssh_port key_assoc = KeyAssociation(keypair=key, ssh_user=ssh_user, port=ssh_port, sudo=self.check_sudo()) machine.key_associations.append(key_assoc) machine.save() if trigger_session_update_flag: trigger_session_update(owner.id, ['keys']) return key.name, ssh_user raise MachineUnauthorizedError("%s:%s" % (cloud_id, machine_id))
def _connect(self): host = dnat(self.cloud.owner, self.cloud.host) return get_driver(Provider.VSPHERE)(host=host, username=self.cloud.username, password=self.cloud.password)