def _check(self, res): if not self.vm_id: res.exists = False return if self.state in (self.STOPPED, self.STOPPING): res.is_up = ping_tcp_port(self.main_ipv4, 22) if not res.is_up: self.state = self.STOPPED res.is_reachable = False return res.exists = True avg = self.get_load_avg() if avg is None: if self.state in (self.UP, self.RESCUE): self.state = self.UNREACHABLE res.is_reachable = False res.is_up = False elif self.run_command("test -f /etc/NIXOS", check=False) != 0: self.state = self.RESCUE self.ssh_pinged = True self._ssh_pinged_this_time = True res.is_reachable = True res.is_up = False else: res.is_up = True MachineState._check(self, res)
def _check(self, res): try: node = self.node() res.exists = True res.is_up = node.state == NodeState.RUNNING or node.state == NodeState.REBOOTING if node.state == NodeState.REBOOTING or node.state == NodeState.PENDING: self.state = self.STARTING if node.state == NodeState.STOPPED or node.state == NodeState.TERMINATED: self.state = self.STOPPED if node.state == NodeState.UNKNOWN: self.state = self.UNKNOWN if node.state == NodeState.RUNNING: # check that all disks are attached res.disks_ok = True for k, v in self.block_device_mapping.iteritems(): disk_name = v['disk_name'] or v['disk'] if all(d.get("deviceName", None) != disk_name for d in node.extra['disks']): res.disks_ok = False res.messages.append("disk {0} is detached".format(disk_name)) try: disk = self.connect().ex_get_volume(disk_name, v.get('region', None)) except libcloud.common.google.ResourceNotFoundError: res.messages.append("disk {0} is destroyed".format(disk_name)) self.handle_changed_property('public_ipv4', node.public_ips[0] if node.public_ips else None, property_name = 'IP address') if self.public_ipv4: known_hosts.add(self.public_ipv4, self.public_host_key) MachineState._check(self, res) except libcloud.common.google.ResourceNotFoundError: res.exists = False res.is_up = False self.state = self.MISSING;
def _check(self, res): if not self.vm_id: res.exists = False return state = self._get_vm_state(can_fail=True) if state is None: with self.depl._db: self.vm_id = None self.private_ipv4 = None self.sata_controller_created = False self.public_host_key = None self.private_host_key = None self.shared_folders = {} self.disks = {} self.state = self.MISSING return res.exists = True #self.log("VM state is ‘{0}’".format(state)) if state == "poweroff" or state == "aborted": res.is_up = False self.state = self.STOPPED elif state == "running": res.is_up = True self._update_ip() MachineState._check(self, res) else: self.state = self.UNKNOWN
def reboot(self, hard=False): if hard: self.log("sending hard reset to GCE machine...") self.node().reboot() self.state = self.STARTING else: MachineState.reboot(self, hard=hard)
def _check(self, res): if not self.vm_id: res.exists = False return self.connect() instance = self._get_instance_by_id(self.vm_id, allow_missing=True) old_state = self.state #self.log("instance state is ‘{0}’".format(instance.state if instance else "gone")) if instance is None or instance.state in {"shutting-down", "terminated"}: self.state = self.MISSING return res.exists = True if instance.state == "pending": res.is_up = False self.state = self.STARTING elif instance.state == "running": res.is_up = True res.disks_ok = True for k, v in self.block_device_mapping.items(): if k not in instance.block_device_mapping.keys() and v.get('volumeId', None): res.disks_ok = False res.messages.append("volume ‘{0}’ not attached to ‘{1}’".format(v['volumeId'], _sd_to_xvd(k))) volume = self._get_volume_by_id(v['volumeId'], allow_missing=True) if not volume: res.messages.append("volume ‘{0}’ no longer exists".format(v['volumeId'])) if k in instance.block_device_mapping.keys() and instance.block_device_mapping[k].status != 'attached' : res.disks_ok = False res.messages.append("volume ‘{0}’ on device ‘{1}’ has unexpected state: ‘{2}’".format(v['volumeId'], _sd_to_xvd(k), instance.block_device_mapping[k].status)) if self.private_ipv4 != instance.private_ip_address or self.public_ipv4 != instance.ip_address: self.warn("IP address has changed, you may need to run ‘nixops deploy’") self.private_ipv4 = instance.private_ip_address self.public_ipv4 = instance.ip_address MachineState._check(self, res) elif instance.state == "stopping": res.is_up = False self.state = self.STOPPING elif instance.state == "stopped": res.is_up = False self.state = self.STOPPED # check for scheduled events instance_status = self._conn.get_all_instance_status(instance_ids=[instance.id]) for ist in instance_status: if ist.events: for e in ist.events: res.messages.append("Event ‘{0}’:".format(e.code)) res.messages.append(" * {0}".format(e.description)) res.messages.append(" * {0} - {1}".format(e.not_before, e.not_after))
def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id) self.conn = libvirt.open('qemu:///system') if self.conn is None: self.log('Failed to open connection to the hypervisor') sys.exit(1) self._dom = None
def _check(self, res): if not self.vm_id: res.exists = False return res.exists = True res.is_up = nixops.util.ping_tcp_port(self.target_host, self.ssh_port) if res.is_up: MachineState._check(self, res)
def reboot(self, hard: bool = False) -> None: if hard: self.log("sending hard reset to droplet...") droplet = self._get_droplet() droplet.reboot() self.state = self.STARTING self.wait_for_ssh() else: MachineState.reboot(self, hard=hard)
def reboot(self, hard=False): if hard: self.log("sending hard reset to droplet...") droplet = digitalocean.Droplet(id=self.droplet_id, token=self.get_auth_token()) droplet.reboot() self.wait_for_ssh() self.state = self.STARTING else: MachineState.reboot(self, hard=hard)
def reboot(self, hard=False): if hard: self.log_start("sending hard reset to robot...") server = self._get_server_by_ip(self.main_ipv4) server.reboot('hard') self.log_end("done.") self.state = self.STARTING self.ssh.reset() else: MachineState.reboot(self, hard=hard)
def reboot(self, hard=False): if hard: self.log_start("sending hard reset to robot... ") server = self._get_server_by_ip(self.main_ipv4) server.reboot('hard') self.log_end("done.") self.state = self.STARTING self.ssh.reset() else: MachineState.reboot(self, hard=hard)
def _check(self, res): if not self.vm_id: res.exists = False return state = self._get_vm_state() res.exists = True #self.log("VM state is ‘{0}’".format(state)) if state == "poweroff" or state == "aborted": res.is_up = False self.state = self.STOPPED elif state == "running": res.is_up = True self._update_ip() MachineState._check(self, res) else: self.state = self.UNKNOWN
def get_keys(self): keys = MachineState.get_keys(self) # Ugly: we have to add the generated keys because they're not # there in the first evaluation (though they are present in # the final nix-build). for k, v in self.block_device_mapping.items(): if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") != "": keys["luks-" + (v['disk_name'] or v['disk'])] = v['generatedKey'] return keys
def get_keys(self): keys = MachineState.get_keys(self) # Ugly: we have to add the generated keys because they're not # there in the first evaluation (though they are present in # the final nix-build). for k, v in self.block_device_mapping.items(): if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") != "": keys["luks-" + (v['disk_name'] or v['disk'])] = { 'text': v['generatedKey'], 'group': 'root', 'permissions': '0600', 'user': '******'} return keys
def reboot(self, hard=False): if hard: self.log("sending hard reset to server...") res = self._api('/v1/servers/%d/actions/reset' % self.server_id, method='POST') action = res['action'] action_path = '/v1/servers/%d/actions/%d' % (self.server_id, action['id']) while action['status'] == 'running': time.sleep(1) res = self._api(action_path, method='GET') action = res['action'] if action['status'] != 'success': raise Exception('unexpected status: %s' % action['status']) self.wait_for_ssh() self.state = self.STARTING else: MachineState.reboot(self, hard=hard)
def get_keys(self): keys = MachineState.get_keys(self) # Ugly: we have to add the generated keys because they're not # there in the first evaluation (though they are present in # the final nix-build). for k, v in self.block_device_mapping.items(): if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") != "": keys["luks-" + _sd_to_xvd(k).replace('/dev/', '')] = v['generatedKey'] return keys
def get_ssh_flags(self): # When using a remote container host, we have to proxy the ssh # connection to the container via the host. flags = ["-i", self.get_ssh_private_key_file()] if self.host == "localhost": flags.extend(MachineState.get_ssh_flags(self)) else: cmd = "ssh -x -a root@{0} {1} nc -c {2} {3}".format(self.get_host_ssh(), " ".join(self.get_host_ssh_flags()), self.private_ipv4, self.ssh_port) flags.extend(["-o", "ProxyCommand=" + cmd]) return flags
def get_keys(self): keys = MachineState.get_keys(self) # Ugly: we have to add the generated keys because they're not # there in the first evaluation (though they are present in # the final nix-build). for k, v in self.block_device_mapping.items(): if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") != "": key_name = "luks-" + (v['disk_name'] or v['disk']) keys[key_name] = { 'text': v['generatedKey'], 'keyFile': '/run/keys' + key_name, 'destDir': '/run/keys', 'group': 'root', 'permissions': '0600', 'user': '******'} return keys
def _check(self, res): if not self.vm_id: res.exists = False return status = self.host_ssh.run_command("nixos-container status {0}".format(self.vm_id), capture_stdout=True).rstrip() if status == "gone": res.exists = False self.state = self.MISSING return res.exists = True if status == "down": res.is_up = False self.state = self.STOPPED return res.is_up = True MachineState._check(self, res)
def get_keys(self): keys = MachineState.get_keys(self) # Ugly: we have to add the generated keys because they're not # there in the first evaluation (though they are present in # the final nix-build). for k, v in self.block_device_mapping.items(): if v.get("encrypt", False) and v.get("passphrase", "") == "" and v.get("generatedKey", "") != "": keys["luks-" + (v["disk_name"] or v["disk"])] = { "text": v["generatedKey"], "group": "root", "permissions": "0600", "user": "******", } return keys
def switch_to_configuration(self, method, sync, command=None): if self.state == self.RESCUE: # We cannot use the mountpoint command here, because it's unable to # detect bind mounts on files, so we just go ahead and try to # unmount. umount = 'if umount "{0}" 2> /dev/null; then rm -f "{0}"; fi' cmd = '; '.join([umount.format(os.path.join("/mnt/etc", mnt)) for mnt in ("resolv.conf", "passwd", "group")]) self.run_command(cmd) command = "chroot /mnt /nix/var/nix/profiles/system/bin/" command += "switch-to-configuration" res = MachineState.switch_to_configuration(self, method, sync, command) if res not in (0, 100): return res if self.state == self.RESCUE and self.just_installed: self.reboot_sync() self.just_installed = False return res
def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id) self._conn = None self._conn_route53 = None
def __init__(self, depl: Deployment, name: str, id): MachineState.__init__(self, depl, name, id) self._client = None
def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id) self._disk_attached = False
def address_to(self, m): if isinstance(m, VirtualBoxState): return m.private_ipv4 return MachineState.address_to(self, m)
def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id)
def address_to(self, m): if isinstance(m, LibvirtdState): return m.private_ipv4 return MachineState.address_to(self, m)
def address_to(self, m): if isinstance(m, ContainerState) and self.host == m.host: return m.private_ipv4 return MachineState.address_to(self, m)
def address_to(self, resource): """Return the IP address to be used to access "resource" from this machine.""" if isinstance(resource, GCEState) and resource.network == self.network: return resource.private_ipv4 else: return MachineState.address_to(self, resource)
def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id) self._robot = None
def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id) self.host_ssh = nixops.ssh_util.SSH(self.logger) self.host_ssh.register_host_fun(self.get_host_ssh) self.host_ssh.register_flag_fun(self.get_host_ssh_flags)
def __init__(self, depl: Deployment, name: str, id: RecordId) -> None: MachineState.__init__(self, depl, name, id) self.name: str = name
def copy_closure_to(self, path): if self.host == "localhost": return MachineState.copy_closure_to(self, path)
def address_to(self, m): if isinstance(m, EC2State): # FIXME: only if we're in the same region return m.private_ipv4 return MachineState.address_to(self, m)
def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id) self._conn = None
def __init__(self, depl, name, id): MachineState.__init__(self, depl, name, id) self._conn = None self._dom = None self._pool = None self._vol = None
def _check(self, res): res.exists = True # can't really check res.is_up = nixops.util.ping_tcp_port(self.target_host, self.ssh_port) if res.is_up: MachineState._check(self, res)
def __init__(self, depl: nixops.deployment.Deployment, name: str, id): MachineState.__init__(self, depl, name, id) self.name = name self._conn = None