def interactive_shell(self, owner, machine_id, cloud_id, **kwargs): lxd_port, cloud = \ self.get_lxd_endpoint(owner, cloud_id=cloud_id, job_id=None) log.info("Autoconfiguring LXDShell for machine %s:%s", cloud.id, machine_id) ssl_enabled = cloud.key_file and cloud.cert_file from mist.api.methods import connect_provider conn = connect_provider(cloud) config = {"wait-for-websocket": True, "interactive": True} environment = {"TERM": "xterm"} config["environment"] = environment config["width"] = kwargs["cols"] config["height"] = kwargs["rows"] # I need here the name not mist id machine = Machine.objects.get(id=machine_id, cloud=cloud_id) cont_id = machine.name response = conn.ex_execute_cmd_on_container(cont_id=cont_id, command=["/bin/sh"], **config) uuid = response.uuid secret_0 = response.secret_0 self.set_ws_data(control=response.control, uuid=uuid, secret=secret_0) # build the uri to use for the connection self.build_uri(lxd_port=lxd_port, cloud=cloud, ssl_enabled=ssl_enabled, **kwargs)
def associate_ip(owner, cloud_id, network_id, ip, machine_id=None, assign=True): cloud = Cloud.objects.get(owner=owner, id=cloud_id, deleted=None) conn = connect_provider(cloud) if conn.type != Provider.NEPHOSCALE: return False return conn.ex_associate_ip(ip, server=machine_id, assign=assign)
def create_machine(owner, cloud_id, key_id, machine_name, location_id, image_id, size_id, image_extra, disk, image_name, size_name, location_name, ips, monitoring, networks=[], docker_env=[], docker_command=None, ssh_port=22, script='', script_id='', script_params='', job_id=None, job=None, docker_port_bindings={}, docker_exposed_ports={}, azure_port_bindings='', hostname='', plugins=None, disk_size=None, disk_path=None, post_script_id='', post_script_params='', cloud_init='', associate_floating_ip=False, associate_floating_ip_subnet=None, project_id=None, schedule={}, command=None, tags=None, bare_metal=False, hourly=True, softlayer_backend_vlan_id=None, size_ram=256, size_cpu=1, size_disk_primary=5, size_disk_swap=1, boot=True, build=True, cpu_priority=1, cpu_sockets=1, cpu_threads=1, port_speed=0, hypervisor_group_id=None): """Creates a new virtual machine on the specified cloud. If the cloud is Rackspace it attempts to deploy the node with an ssh key provided in config. the method used is the only one working in the old Rackspace cloud. create_node(), from libcloud.compute.base, with 'auth' kwarg doesn't do the trick. Didn't test if you can upload some ssh related files using the 'ex_files' kwarg from openstack 1.0 driver. In Linode creation is a bit different. There you can pass the key file directly during creation. The Linode API also requires to set a disk size and doesn't get it from size.id. So, send size.disk from the client and use it in all cases just to avoid provider checking. Finally, Linode API does not support association between a machine and the image it came from. We could set this, at least for machines created through mist.api in ex_comment, lroot or lconfig. lroot seems more appropriate. However, liblcoud doesn't support linode.config.list at the moment, so no way to get them. Also, it will create inconsistencies for machines created through mist.api and those from the Linode interface. """ # script: a command that is given once # script_id: id of a script that exists - for mist.core # script_params: extra params, for script_id # post_script_id: id of a script that exists - for mist.core. If script_id # or monitoring are supplied, this will run after both finish # post_script_params: extra params, for post_script_id log.info('Creating machine %s on cloud %s' % (machine_name, cloud_id)) cloud = Cloud.objects.get(owner=owner, id=cloud_id, deleted=None) conn = connect_provider(cloud) machine_name = machine_name_validator(conn.type, machine_name) key = None if key_id: key = Key.objects.get(owner=owner, id=key_id, deleted=None) # if key_id not provided, search for default key if conn.type not in [Provider.LIBVIRT, Provider.DOCKER, Provider.ONAPP]: if not key_id: key = Key.objects.get(owner=owner, default=True, deleted=None) key_id = key.name if key: private_key = key.private public_key = key.public.replace('\n', '') else: public_key = None size = NodeSize(size_id, name=size_name, ram='', disk=disk, bandwidth='', price='', driver=conn) image = NodeImage(image_id, name=image_name, extra=image_extra, driver=conn) location = NodeLocation(location_id, name=location_name, country='', driver=conn) if conn.type is Provider.DOCKER: if public_key: node = _create_machine_docker( conn, machine_name, image_id, '', public_key=public_key, docker_env=docker_env, docker_command=docker_command, docker_port_bindings=docker_port_bindings, docker_exposed_ports=docker_exposed_ports ) node_info = conn.inspect_node(node) try: ssh_port = int( node_info.extra[ 'network_settings']['Ports']['22/tcp'][0]['HostPort']) except: pass else: node = _create_machine_docker( conn, machine_name, image_id, script, docker_env=docker_env, docker_command=docker_command, docker_port_bindings=docker_port_bindings, docker_exposed_ports=docker_exposed_ports ) elif conn.type in [Provider.RACKSPACE_FIRST_GEN, Provider.RACKSPACE]: node = _create_machine_rackspace(conn, public_key, machine_name, image, size, location, user_data=cloud_init) elif conn.type in [Provider.OPENSTACK]: node = _create_machine_openstack(conn, private_key, public_key, machine_name, image, size, location, networks, cloud_init) elif conn.type in config.EC2_PROVIDERS and private_key: locations = conn.list_locations() for loc in locations: if loc.id == location_id: location = loc break node = _create_machine_ec2(conn, key_id, private_key, public_key, machine_name, image, size, location, cloud_init) elif conn.type is Provider.NEPHOSCALE: node = _create_machine_nephoscale(conn, key_id, private_key, public_key, machine_name, image, size, location, ips) elif conn.type is Provider.GCE: sizes = conn.list_sizes(location=location_name) for size in sizes: if size.id == size_id: size = size break node = _create_machine_gce(conn, key_id, private_key, public_key, machine_name, image, size, location, cloud_init) elif conn.type is Provider.SOFTLAYER: node = _create_machine_softlayer( conn, key_id, private_key, public_key, machine_name, image, size, location, bare_metal, cloud_init, hourly, softlayer_backend_vlan_id ) elif conn.type is Provider.ONAPP: node = _create_machine_onapp( conn, public_key, machine_name, image, size_ram, size_cpu, size_disk_primary, size_disk_swap, boot, build, cpu_priority, cpu_sockets, cpu_threads, port_speed, location, networks, hypervisor_group_id ) elif conn.type is Provider.DIGITAL_OCEAN: node = _create_machine_digital_ocean( conn, key_id, private_key, public_key, machine_name, image, size, location, cloud_init) elif conn.type == Provider.AZURE: node = _create_machine_azure( conn, key_id, private_key, public_key, machine_name, image, size, location, cloud_init=cloud_init, cloud_service_name=None, azure_port_bindings=azure_port_bindings ) elif conn.type in [Provider.VCLOUD, Provider.INDONESIAN_VCLOUD]: node = _create_machine_vcloud(conn, machine_name, image, size, public_key, networks) elif conn.type is Provider.LINODE and private_key: # FIXME: The orchestration UI does not provide all the necessary # parameters, thus we need to fetch the proper size and image objects. # This should be properly fixed when migrated to the controllers. if not disk: for size in conn.list_sizes(): if int(size.id) == int(size_id): size = size break if not image_extra: # Missing: {'64bit': 1, 'pvops': 1} for image in conn.list_images(): if int(image.id) == int(image_id): image = image break node = _create_machine_linode(conn, key_id, private_key, public_key, machine_name, image, size, location) elif conn.type == Provider.HOSTVIRTUAL: node = _create_machine_hostvirtual(conn, public_key, machine_name, image, size, location) elif conn.type == Provider.VULTR: node = _create_machine_vultr(conn, public_key, machine_name, image, size, location, cloud_init) elif conn.type is Provider.LIBVIRT: try: # size_id should have a format cpu:ram, eg 1:2048 cpu = size_id.split(':')[0] ram = size_id.split(':')[1] except: ram = 512 cpu = 1 node = _create_machine_libvirt(conn, machine_name, disk_size=disk_size, ram=ram, cpu=cpu, image=image_id, disk_path=disk_path, networks=networks, public_key=public_key, cloud_init=cloud_init) elif conn.type == Provider.PACKET: node = _create_machine_packet(conn, public_key, machine_name, image, size, location, cloud_init, project_id) else: raise BadRequestError("Provider unknown.") if key is not None: # we did this change because there was race condition with # list_machines try: machine = Machine(cloud=cloud, machine_id=node.id).save() except me.NotUniqueError: machine = Machine.objects.get(cloud=cloud, machine_id=node.id) username = node.extra.get('username', '') machine.ctl.associate_key(key, username=username, port=ssh_port, no_connect=True) # Call post_deploy_steps for every provider if conn.type == Provider.AZURE: # for Azure, connect with the generated password, deploy the ssh key # when this is ok, it calls post_deploy for script/monitoring mist.api.tasks.azure_post_create_steps.delay( owner.id, cloud_id, node.id, monitoring, key_id, node.extra.get('username'), node.extra.get('password'), public_key, script=script, script_id=script_id, script_params=script_params, job_id=job_id, hostname=hostname, plugins=plugins, post_script_id=post_script_id, post_script_params=post_script_params, schedule=schedule, job=job, ) elif conn.type == Provider.OPENSTACK: if associate_floating_ip: networks = list_networks(owner, cloud_id) mist.api.tasks.openstack_post_create_steps.delay( owner.id, cloud_id, node.id, monitoring, key_id, node.extra.get('username'), node.extra.get('password'), public_key, script=script, script_id=script_id, script_params=script_params, job_id=job_id, job=job, hostname=hostname, plugins=plugins, post_script_params=post_script_params, networks=networks, schedule=schedule, ) elif conn.type == Provider.RACKSPACE_FIRST_GEN: # for Rackspace First Gen, cannot specify ssh keys. When node is # created we have the generated password, so deploy the ssh key # when this is ok and call post_deploy for script/monitoring mist.api.tasks.rackspace_first_gen_post_create_steps.delay( owner.id, cloud_id, node.id, monitoring, key_id, node.extra.get('password'), public_key, script=script, script_id=script_id, script_params=script_params, job_id=job_id, job=job, hostname=hostname, plugins=plugins, post_script_id=post_script_id, post_script_params=post_script_params, schedule=schedule ) elif key_id: mist.api.tasks.post_deploy_steps.delay( owner.id, cloud_id, node.id, monitoring, script=script, key_id=key_id, script_id=script_id, script_params=script_params, job_id=job_id, job=job, hostname=hostname, plugins=plugins, post_script_id=post_script_id, post_script_params=post_script_params, schedule=schedule, ) if tags: resolve_id_and_set_tags(owner, 'machine', node.id, tags, cloud_id=cloud_id) ret = {'id': node.id, 'name': node.name, 'extra': node.extra, 'public_ips': node.public_ips, 'private_ips': node.private_ips, 'job_id': job_id, } return ret