Example #1
0
    def create_node(self, defn):

        if not self.vm_id:
            self.log("creating {0}...".format(self.full_name))
            boot_disk = next((v for k,v in defn.block_device_mapping.iteritems() if v.get('bootDisk', False)), None)
            if not boot_disk:
                raise Exception("no boot disk found for {0}".format(self.full_name))
            try:
                service_accounts = []
                account = { 'email': defn.email }
                if defn.scopes != []:
                    account['scopes'] = defn.scopes
                service_accounts.append(account)
                # keeping a gcloud like behavior, if nothing was specified
                # i.e service account is default get the default scopes as well
                if defn.email == 'default' and defn.scopes == []: service_accounts=None

                node = self.connect().create_node(self.machine_name, defn.instance_type, "",
                                 ex_preemptible = (defn.preemptible if defn.preemptible else None),
                                 location = self.connect().ex_get_zone(defn.region),
                                 ex_boot_disk = self.connect().ex_get_volume(boot_disk['disk_name'] or boot_disk['disk'], boot_disk.get('region', None)),
                                 ex_metadata = self.full_metadata(defn.metadata), ex_tags = defn.tags, ex_service_accounts = service_accounts,
                                 external_ip = (self.connect().ex_get_address(defn.ipAddress) if defn.ipAddress else 'ephemeral'),
                                 ex_network = (defn.network if defn.network else 'default') )
            except libcloud.common.google.ResourceExistsError:
                raise Exception("tried creating an instance that already exists; "
                                "please run 'deploy --check' to fix this")
            self.vm_id = self.machine_name
            self.state = self.STARTING
            self.ssh_pinged = False
            self.copy_properties(defn)
            self.public_ipv4 = node.public_ips[0]
            self.log("got public IP: {0}".format(self.public_ipv4))
            known_hosts.add(self.public_ipv4, self.public_host_key)
            self.private_ipv4 = node.private_ips[0]
            for k,v in self.block_device_mapping.iteritems():
                v['needsAttach'] = True
                self.update_block_device_mapping(k, v)
            # set scheduling config here instead of triggering an update using None values
            # because we might be called with defn = self, thus modifying self would ruin defn
            self.connect().ex_set_node_scheduling(node,
                                                  automatic_restart = defn.automatic_restart,
                                                  on_host_maintenance = defn.on_host_maintenance)
            self.automatic_restart = defn.automatic_restart
            self.on_host_maintenance = defn.on_host_maintenance

        # Update service account
        if self.email != defn.email or self.scopes != defn.scopes:
            self.log('updating the service account')
            node = self.node()
            request = '/zones/%s/instances/%s/setServiceAccount' % (node.extra['zone'].name, node.name)
            service_account = {}
            service_account["email"] = defn.email
            if defn.scopes != []: service_account["scopes"] = defn.scopes
            self.connect().connection.async_request(request, method='POST', data=service_account)
            self.email = defn.email
            self.scopes = defn.scopes

        # Attach missing volumes
        for k, v in self.block_device_mapping.items():
            defn_v = defn.block_device_mapping.get(k, None)
            if v.get('needsAttach', False) and defn_v:
                disk_name = v['disk_name'] or v['disk']
                disk_region = v.get('region', None)
                v['readOnly'] = defn_v['readOnly']
                v['bootDisk'] = defn_v['bootDisk']
                v['deleteOnTermination'] = defn_v['deleteOnTermination']
                v['passphrase'] = defn_v['passphrase']
                self.log("attaching GCE disk '{0}'...".format(disk_name))
                if not v.get('bootDisk', False):
                    self.connect().attach_volume(self.node(), self.connect().ex_get_volume(disk_name, disk_region),
                                   device = disk_name,
                                   ex_mode = ('READ_ONLY' if v['readOnly'] else 'READ_WRITE'))
                del v['needsAttach']
                self.update_block_device_mapping(k, v)

            # generate LUKS key if the model didn't specify one
            if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") == "":
                v['generatedKey'] = generate_random_string(length=256)
                self.update_block_device_mapping(k, v)

        if self.metadata != defn.metadata:
            self.log('setting new metadata values')
            node = self.node()
            meta = self.gen_metadata(self.full_metadata(defn.metadata))
            request = '/zones/%s/instances/%s/setMetadata' % (node.extra['zone'].name,
                                                        node.name)
            metadata_data = {}
            metadata_data['items'] = meta['items']
            metadata_data['kind'] = meta['kind']
            metadata_data['fingerprint'] = node.extra['metadata']['fingerprint']

            self.connect().connection.async_request(request, method='POST',
                                          data=metadata_data)
            self.metadata = defn.metadata

        if self.tags != defn.tags:
            self.log('updating tags')
            self.connect().ex_set_node_tags(self.node(), defn.tags)
            self.tags = defn.tags

        if self.public_ipv4 and self.ipAddress != defn.ipAddress:
            self.log("detaching old public IP address {0}".format(self.public_ipv4))
            self.connect().connection.async_request(
                "/zones/{0}/instances/{1}/deleteAccessConfig?accessConfig=External+NAT&networkInterface=nic0"
                .format(self.region, self.machine_name), method = 'POST')
            self.public_ipv4 = None
            self.ipAddress = None

        if self.public_ipv4 is None:
            self.log("attaching public IP address {0}".format(defn.ipAddress or "[Ephemeral]"))
            self.connect().connection.async_request(
                "/zones/{0}/instances/{1}/addAccessConfig?networkInterface=nic0"
                .format(self.region, self.machine_name), method = 'POST', data = {
                  'kind': 'compute#accessConfig',
                  'type': 'ONE_TO_ONE_NAT',
                  'name': 'External NAT',
                  'natIP': self.connect().ex_get_address(defn.ipAddress).address if defn.ipAddress else None
                })
            self.ipAddress = defn.ipAddress
            self.public_ipv4 = self.node().public_ips[0]
            self.log("got public IP: {0}".format(self.public_ipv4))
            known_hosts.add(self.public_ipv4, self.public_host_key)
            self.ssh.reset()
            self.ssh_pinged = False

        if self.automatic_restart != defn.automatic_restart or self.on_host_maintenance != defn.on_host_maintenance:
            self.log("setting scheduling configuration")
            self.connect().ex_set_node_scheduling(self.node(),
                                                  automatic_restart = defn.automatic_restart,
                                                  on_host_maintenance = defn.on_host_maintenance)
            self.automatic_restart = defn.automatic_restart
            self.on_host_maintenance = defn.on_host_maintenance
Example #2
0
    def create_node(self, defn):

        if not self.vm_id:
            self.log("creating {0}...".format(self.full_name))
            boot_disk = next(
                (v for k, v in defn.block_device_mapping.iteritems()
                 if v.get('bootDisk', False)), None)
            if not boot_disk:
                raise Exception("no boot disk found for {0}".format(
                    self.full_name))
            try:
                service_accounts = []
                account = {'email': defn.email}
                if defn.scopes != []:
                    account['scopes'] = defn.scopes
                service_accounts.append(account)
                # keeping a gcloud like behavior, if nothing was specified
                # i.e service account is default get the default scopes as well
                if defn.email == 'default' and defn.scopes == []:
                    service_accounts = None

                node = self.connect().create_node(
                    self.machine_name,
                    defn.instance_type,
                    "",
                    location=self.connect().ex_get_zone(defn.region),
                    ex_boot_disk=self.connect().ex_get_volume(
                        boot_disk['disk_name'] or boot_disk['disk'],
                        boot_disk.get('region', None)),
                    ex_metadata=self.full_metadata(defn.metadata),
                    ex_tags=defn.tags,
                    ex_service_accounts=service_accounts,
                    external_ip=(self.connect().ex_get_address(defn.ipAddress)
                                 if defn.ipAddress else 'ephemeral'),
                    ex_network=(defn.network if defn.network else 'default'))
            except libcloud.common.google.ResourceExistsError:
                raise Exception(
                    "tried creating an instance that already exists; "
                    "please run 'deploy --check' to fix this")
            self.vm_id = self.machine_name
            self.state = self.STARTING
            self.ssh_pinged = False
            self.copy_properties(defn)
            self.public_ipv4 = node.public_ips[0]
            self.log("got public IP: {0}".format(self.public_ipv4))
            known_hosts.add(self.public_ipv4, self.public_host_key)
            self.private_ipv4 = node.private_ips[0]
            for k, v in self.block_device_mapping.iteritems():
                v['needsAttach'] = True
                self.update_block_device_mapping(k, v)
            # set scheduling config here instead of triggering an update using None values
            # because we might be called with defn = self, thus modifying self would ruin defn
            self.connect().ex_set_node_scheduling(
                node,
                automatic_restart=defn.automatic_restart,
                on_host_maintenance=defn.on_host_maintenance)
            self.automatic_restart = defn.automatic_restart
            self.on_host_maintenance = defn.on_host_maintenance

        # Update service account
        if self.email != defn.email or self.scopes != defn.scopes:
            self.log('updating the service account')
            node = self.node()
            request = '/zones/%s/instances/%s/setServiceAccount' % (
                node.extra['zone'].name, node.name)
            service_account = {}
            service_account["email"] = defn.email
            if defn.scopes != []: service_account["scopes"] = defn.scopes
            self.connect().connection.async_request(request,
                                                    method='POST',
                                                    data=service_account)
            self.email = defn.email
            self.scopes = defn.scopes

        # Attach missing volumes
        for k, v in self.block_device_mapping.items():
            defn_v = defn.block_device_mapping.get(k, None)
            if v.get('needsAttach', False) and defn_v:
                disk_name = v['disk_name'] or v['disk']
                disk_region = v.get('region', None)
                v['readOnly'] = defn_v['readOnly']
                v['bootDisk'] = defn_v['bootDisk']
                v['deleteOnTermination'] = defn_v['deleteOnTermination']
                v['passphrase'] = defn_v['passphrase']
                self.log("attaching GCE disk '{0}'...".format(disk_name))
                if not v.get('bootDisk', False):
                    self.connect().attach_volume(
                        self.node(),
                        self.connect().ex_get_volume(disk_name, disk_region),
                        device=disk_name,
                        ex_mode=('READ_ONLY'
                                 if v['readOnly'] else 'READ_WRITE'))
                del v['needsAttach']
                self.update_block_device_mapping(k, v)

            # generate LUKS key if the model didn't specify one
            if v.get('encrypt', False) and v.get('passphrase',
                                                 "") == "" and v.get(
                                                     'generatedKey', "") == "":
                v['generatedKey'] = generate_random_string(length=256)
                self.update_block_device_mapping(k, v)

        if self.metadata != defn.metadata:
            self.log('setting new metadata values')
            node = self.node()
            meta = self.gen_metadata(self.full_metadata(defn.metadata))
            request = '/zones/%s/instances/%s/setMetadata' % (
                node.extra['zone'].name, node.name)
            metadata_data = {}
            metadata_data['items'] = meta['items']
            metadata_data['kind'] = meta['kind']
            metadata_data['fingerprint'] = node.extra['metadata'][
                'fingerprint']

            self.connect().connection.async_request(request,
                                                    method='POST',
                                                    data=metadata_data)
            self.metadata = defn.metadata

        if self.tags != defn.tags:
            self.log('updating tags')
            self.connect().ex_set_node_tags(self.node(), defn.tags)
            self.tags = defn.tags

        if self.public_ipv4 and self.ipAddress != defn.ipAddress:
            self.log("detaching old public IP address {0}".format(
                self.public_ipv4))
            self.connect().connection.async_request(
                "/zones/{0}/instances/{1}/deleteAccessConfig?accessConfig=External+NAT&networkInterface=nic0"
                .format(self.region, self.machine_name),
                method='POST')
            self.public_ipv4 = None
            self.ipAddress = None

        if self.public_ipv4 is None:
            self.log("attaching public IP address {0}".format(
                defn.ipAddress or "[Ephemeral]"))
            self.connect().connection.async_request(
                "/zones/{0}/instances/{1}/addAccessConfig?networkInterface=nic0"
                .format(self.region, self.machine_name),
                method='POST',
                data={
                    'kind':
                    'compute#accessConfig',
                    'type':
                    'ONE_TO_ONE_NAT',
                    'name':
                    'External NAT',
                    'natIP':
                    self.connect().ex_get_address(defn.ipAddress).address
                    if defn.ipAddress else None
                })
            self.ipAddress = defn.ipAddress
            self.public_ipv4 = self.node().public_ips[0]
            self.log("got public IP: {0}".format(self.public_ipv4))
            known_hosts.add(self.public_ipv4, self.public_host_key)
            self.ssh.reset()
            self.ssh_pinged = False

        if self.automatic_restart != defn.automatic_restart or self.on_host_maintenance != defn.on_host_maintenance:
            self.log("setting scheduling configuration")
            self.connect().ex_set_node_scheduling(
                self.node(),
                automatic_restart=defn.automatic_restart,
                on_host_maintenance=defn.on_host_maintenance)
            self.automatic_restart = defn.automatic_restart
            self.on_host_maintenance = defn.on_host_maintenance
Example #3
0
    def create_node(self, defn):
        if not self.vm_id:
            self.log("creating {0}...".format(self.full_name))
            boot_disk = next((v for k, v in defn.block_device_mapping.iteritems() if v.get("bootDisk", False)), None)
            if not boot_disk:
                raise Exception("no boot disk found for {0}".format(self.full_name))
            try:
                node = self.connect().create_node(
                    self.machine_name,
                    defn.instance_type,
                    "none",
                    location=self.connect().ex_get_zone(defn.region),
                    ex_boot_disk=self.connect().ex_get_volume(
                        boot_disk["disk_name"] or boot_disk["disk"], boot_disk.get("region", None)
                    ),
                    ex_metadata=self.full_metadata(defn.metadata),
                    ex_tags=defn.tags,
                    external_ip=(self.connect().ex_get_address(defn.ipAddress) if defn.ipAddress else "ephemeral"),
                    ex_network=(defn.network if defn.network else "default"),
                )
            except libcloud.common.google.ResourceExistsError:
                raise Exception(
                    "tried creating an instance that already exists; " "please run 'deploy --check' to fix this"
                )
            self.vm_id = self.machine_name
            self.state = self.STARTING
            self.ssh_pinged = False
            self.copy_properties(defn)
            self.public_ipv4 = node.public_ips[0]
            self.log("got public IP: {0}".format(self.public_ipv4))
            known_hosts.add(self.public_ipv4, self.public_host_key)
            self.private_ipv4 = node.private_ips[0]
            for k, v in self.block_device_mapping.iteritems():
                v["needsAttach"] = True
                self.update_block_device_mapping(k, v)
            # set scheduling config here instead of triggering an update using None values
            # because we might be called with defn = self, thus modifying self would ruin defn
            self.connect().ex_set_node_scheduling(
                node, automatic_restart=defn.automatic_restart, on_host_maintenance=defn.on_host_maintenance
            )
            self.automatic_restart = defn.automatic_restart
            self.on_host_maintenance = defn.on_host_maintenance

        # Attach missing volumes
        for k, v in self.block_device_mapping.items():
            defn_v = defn.block_device_mapping.get(k, None)
            if v.get("needsAttach", False) and defn_v:
                disk_name = v["disk_name"] or v["disk"]
                disk_region = v.get("region", None)
                v["readOnly"] = defn_v["readOnly"]
                v["bootDisk"] = defn_v["bootDisk"]
                v["deleteOnTermination"] = defn_v["deleteOnTermination"]
                v["passphrase"] = defn_v["passphrase"]
                self.log("attaching GCE disk '{0}'...".format(disk_name))
                if not v.get("bootDisk", False):
                    self.connect().attach_volume(
                        self.node(),
                        self.connect().ex_get_volume(disk_name, disk_region),
                        device=disk_name,
                        ex_mode=("READ_ONLY" if v["readOnly"] else "READ_WRITE"),
                    )
                del v["needsAttach"]
                self.update_block_device_mapping(k, v)

            # generate LUKS key if the model didn't specify one
            if v.get("encrypt", False) and v.get("passphrase", "") == "" and v.get("generatedKey", "") == "":
                v["generatedKey"] = generate_random_string(length=256)
                self.update_block_device_mapping(k, v)

        if self.metadata != defn.metadata:
            self.log("setting new metadata values")
            node = self.node()
            meta = self.gen_metadata(self.full_metadata(defn.metadata))
            request = "/zones/%s/instances/%s/setMetadata" % (node.extra["zone"].name, node.name)
            metadata_data = {}
            metadata_data["items"] = meta["items"]
            metadata_data["kind"] = meta["kind"]
            metadata_data["fingerprint"] = node.extra["metadata"]["fingerprint"]

            self.connect().connection.async_request(request, method="POST", data=metadata_data)
            self.metadata = defn.metadata

        if self.tags != defn.tags:
            self.log("updating tags")
            self.connect().ex_set_node_tags(self.node(), defn.tags)
            self.tags = defn.tags

        if self.public_ipv4 and self.ipAddress != defn.ipAddress:
            self.log("detaching old public IP address {0}".format(self.public_ipv4))
            self.connect().connection.async_request(
                "/zones/{0}/instances/{1}/deleteAccessConfig?accessConfig=External+NAT&networkInterface=nic0".format(
                    self.region, self.machine_name
                ),
                method="POST",
            )
            self.public_ipv4 = None
            self.ipAddress = None

        if self.public_ipv4 is None:
            self.log("attaching public IP address {0}".format(defn.ipAddress or "[Ephemeral]"))
            self.connect().connection.async_request(
                "/zones/{0}/instances/{1}/addAccessConfig?networkInterface=nic0".format(self.region, self.machine_name),
                method="POST",
                data={
                    "kind": "compute#accessConfig",
                    "type": "ONE_TO_ONE_NAT",
                    "name": "External NAT",
                    "natIP": self.connect().ex_get_address(defn.ipAddress).address if defn.ipAddress else None,
                },
            )
            self.ipAddress = defn.ipAddress
            self.public_ipv4 = self.node().public_ips[0]
            self.log("got public IP: {0}".format(self.public_ipv4))
            known_hosts.add(self.public_ipv4, self.public_host_key)
            self.ssh.reset()
            self.ssh_pinged = False

        if self.automatic_restart != defn.automatic_restart or self.on_host_maintenance != defn.on_host_maintenance:
            self.log("setting scheduling configuration")
            self.connect().ex_set_node_scheduling(
                self.node(), automatic_restart=defn.automatic_restart, on_host_maintenance=defn.on_host_maintenance
            )
            self.automatic_restart = defn.automatic_restart
            self.on_host_maintenance = defn.on_host_maintenance