def get_ssh(self, username="******"): ssh = SSH(self.provider.root.loop, self.ip, username=username, keys=[self.provider.privkey]) yield from ssh.wait() return ssh
def run(self): fake_stream = self.cfg.get("fake-stream") if fake_stream: while True: with open(fake_stream) as fs: for line in fs: self._handle_stdout(line) yield from asyncio.sleep(3) self.log.info("Stream ended. Starting from beginning.") return if "port" not in self.cfg["ssh"]: self.cfg["ssh"]["port"] = 29418 self.cfg["ssh"]["keys"] = self.root.config.get_ssh_keys( keytype="private") self.ssh = SSH(self.loop, **self.cfg["ssh"]) self.root.task_end_handlers.append(self._handle_task_end) reconnect_delay = self.cfg.get("reconnect_delay", 5) while True: try: status = yield from self.ssh.run("gerrit stream-events", stdout=self._handle_stdout, stderr=self._handle_stderr) self.log.info("Gerrit stream exited with status %s" % status) except asyncio.CancelledError: self.log.info("Stopping gerrit") del self.ssh return except: self.log.exception("Error listening gerrit events") self.log.info("Reconnect in %s seconds" % reconnect_delay) yield from asyncio.sleep(reconnect_delay)
def get_ssh(self, user="******"): yield from self.get_ip() ssh = self._ssh_cache.get(user) if ssh: return ssh ssh = SSH(self.host.root.loop, self.ip, username=user, keys=self.host.root.config.get_ssh_keys("private")) yield from ssh.wait() self._ssh_cache[user] = ssh return ssh
async def start(self): self.access_net = self.config["ssh"]["access_net"] self.ssh_keys = [self.config["ssh"]["private_key_path"]] self.jumphost = self.config["ssh"].get("jumphost") if self.jumphost: self.jumphost = SSH(self.root.loop, keys=self.ssh_keys, **self.jumphost) await self.jumphost.wait() secrets = self.root.config.secrets[self.name] self.client = openstack.Client(secrets["auth_url"], secrets["username"], secrets["tenant"], cafile=secrets["cafile"]) await self.client.login(password=secrets["password"]) self.network_ids = {} self.image_ids = {} self.flavor_ids = {} for network in (await self.client.list_networks())["networks"]: self.network_ids[network["name"]] = network["id"] for image in (await self.client.list_images())["images"]: self.image_ids[image["name"]] = image["id"] for item in (await self.client.list_flavors())["flavors"]: self.flavor_ids[item["name"]] = item["id"] self._ready.set()
def get_ssh(self, loop, username=None): if username: kwargs = self.ssh_kwargs.copy() kwargs.update({"username": username}) else: kwargs = self.ssh_kwargs return SSH(loop, **kwargs)
def get_ssh(self, loop, username=None): LOG.debug("Creating ssh for %s@%s", username, self.ip) return SSH(loop, hostname=self.ip, username=username or self.username, keys=self.keys, password=self.password, jumphost=self.jumphost)
def __init__(self, ssh_conf, provider, root): """ :param dict ssh_conf: item from hosts from provider :param Host host: """ self.provider = provider self.root = root self.config = provider.config self.image_locks = {} self._job_vms = {} self._job_bridge_numbers = {} ssh_conf.setdefault("username", "root") ssh_conf["keys"] = root.config.get_ssh_keys(keytype="private") self.ssh = SSH(root.loop, **ssh_conf) self.la = 0.0 self.free = 0 storage_cf = self.config["storage"] self.storage = BACKENDS[storage_cf["backend"]](self.ssh, **storage_cf) self.bridge_lock = asyncio.Lock(loop=root.loop)
def __init__(self, root, cfg): """ :param dict cfg: provider config """ self.root = root self.cfg = cfg self.name = cfg["name"] self.pubkey = os.path.expanduser( root.config.data["ssh-key"]["default"]["public"]) self.privkey = root.config.data["ssh-key"]["default"]["private"] self.gethost_lock = asyncio.Lock(loop=root.loop) self.job_host = {} self.hosts = [] for host_cfg in cfg.get("hosts"): self.hosts.append(Host(self, SSH(loop=self.root.loop, **host_cfg)))
class Service: data = "" tasks = {} def __init__(self, root, **kwargs): self.root = root self.log = root.log self.loop = root.loop self.cfg = kwargs self.handler_map = { "comment-added": self._handle_comment_added, "patchset-created": self._start_task, "ref-updated": self._start_task, "ref-replicated": self._ignore_event, } def _ignore_event(self, raw_event): pass def _start_task(self, raw_event): event = Event(self.cfg, raw_event) self.root.start_task(task.Task(self.root, event)) def _handle_comment_added(self, raw_event): r = self.cfg.get("recheck-regexp", "^rally-ci recheck$") m = re.search(r, raw_event["comment"], re.MULTILINE) if m: self.log.info("Recheck for %s" % _get_project_name(raw_event)) self._start_task(raw_event) def _handle_event(self, event): event = json.loads(event) project = _get_project_name(event) self.log.debug("Event '%s' for project '%s'" % (event["type"], project)) if project: if not self.root.config.is_project_configured(project): return handler = self.handler_map.get(event["type"]) if handler: handler(event) else: self.log.debug("Unknown event type %s" % event["type"]) else: self.log.warning("No project name") def _handle_stderr(self, data): self.log.warning("Error message from gerrit: %s" % data) def _handle_stdout(self, data): self.data += data while "\n" in self.data: line, self.data = self.data.split("\n", 1) try: self._handle_event(line) except: self.log.exception("Error handling data %s" % self.data) @asyncio.coroutine def run(self): fake_stream = self.cfg.get("fake-stream") if fake_stream: while True: with open(fake_stream) as fs: for line in fs: self._handle_stdout(line) yield from asyncio.sleep(3) self.log.info("Stream ended. Starting from beginning.") return if "port" not in self.cfg["ssh"]: self.cfg["ssh"]["port"] = 29418 self.cfg["ssh"]["keys"] = self.root.config.get_ssh_keys( keytype="private") self.ssh = SSH(self.loop, **self.cfg["ssh"]) self.root.task_end_handlers.append(self._handle_task_end) reconnect_delay = self.cfg.get("reconnect_delay", 5) while True: try: status = yield from self.ssh.run("gerrit stream-events", stdout=self._handle_stdout, stderr=self._handle_stderr) self.log.info("Gerrit stream exited with status %s" % status) except asyncio.CancelledError: self.log.info("Stopping gerrit") del self.ssh return except: self.log.exception("Error listening gerrit events") self.log.info("Reconnect in %s seconds" % reconnect_delay) yield from asyncio.sleep(reconnect_delay) def _handle_task_end(self, task): self.root.start_coro(self.publish_results(task)) @asyncio.coroutine def publish_results(self, task): if self.cfg.get("silent"): return (yield from asyncio.sleep(0)) revision = task.event.raw_event.get("patchSet", {}).get("revision") if not revision: return (yield from asyncio.sleep(0)) self.log.debug("Publishing results for task %s" % self) comment_header = self.cfg.get("comment-header") if not comment_header: self.log.warning("No comment-header configured. Can't publish.") return cmd = ["gerrit", "review"] fail = any([j.error for j in task.jobs if j.voting]) if self.cfg.get("vote"): cmd.append("--verified=-1" if fail else "--verified=+1") succeeded = "failed" if fail else "succeeded" summary = comment_header.format(succeeded=succeeded) tpl = self.cfg["comment-job-template"] for job in task.jobs: success = job.status + ("" if job.voting else " (non-voting)") time = utils.human_time(job.finished_at - job.started_at) summary += tpl.format(success=success, name=job.config["name"], time=time, log_path=job.log_path) summary += "\n" summary += task.summary cmd += ["-m", summary, revision] yield from self.ssh.run(cmd)
class Host: def __init__(self, ssh_conf, provider, root): """ :param dict ssh_conf: item from hosts from provider :param Host host: """ self.provider = provider self.root = root self.config = provider.config self.image_locks = {} self._job_vms = {} self._job_bridge_numbers = {} ssh_conf.setdefault("username", "root") ssh_conf["keys"] = root.config.get_ssh_keys(keytype="private") self.ssh = SSH(root.loop, **ssh_conf) self.la = 0.0 self.free = 0 storage_cf = self.config["storage"] self.storage = BACKENDS[storage_cf["backend"]](self.ssh, **storage_cf) self.bridge_lock = asyncio.Lock(loop=root.loop) def __str__(self): return "<Host %s (la: %s, free: %s)>" % (self.ssh.hostname, self.la, self.free) @asyncio.coroutine def update_stats(self): cmd = "uptime && free -m" err, data, err = yield from self.ssh.out(cmd) self.la = float(RE_LA.search(data, re.MULTILINE).group(1)) free = RE_MEM.search(data, re.MULTILINE).groups() self.free = sum(map(int, free)) @asyncio.coroutine def boot_image(self, name): conf = self.config["images"][name] vm = VM(self, name, conf) vm.disks.append(name) for f in (yield from self.storage.list_files(name)): vm.add_disk(f) vm.add_net(conf.get("build-net", "virbr0")) yield from vm.boot() return vm @asyncio.coroutine def _run_script(self, vm, script): """ :param dict script: script """ ssh = yield from vm.get_ssh(script.get("user", "root")) cmd = script.get("interpreter", "/bin/bash -xe -s") yield from ssh.run(cmd, stdin=script["data"], stderr=LOG.debug) @asyncio.coroutine def build_image(self, name): LOG.info("Building image %s" % name) self.image_locks.setdefault(name, asyncio.Lock(loop=self.root.loop)) with (yield from self.image_locks[name]): if (yield from self.storage.exist(name)): LOG.debug("Image %s exist" % name) return image_conf = self.config["images"][name] parent = image_conf.get("parent") if parent: yield from self.build_image(parent) yield from self.storage.clone(parent, name) else: url = image_conf.get("url") if url: yield from self.storage.download(name, url) yield from self.storage.snapshot(name) return # TODO: support build_script for downloaded images build_scripts = image_conf.get("build-scripts") if build_scripts: vm = yield from self.boot_image(name) try: for script in build_scripts: script = self.root.config.data["script"][script] LOG.debug("Running build script %s" % script) yield from self._run_script(vm, script) yield from vm.shutdown(storage=False) except: LOG.exception("Error building image") yield from vm.destroy() raise else: LOG.debug("No build script for image %s" % name) yield from asyncio.sleep(4) yield from self.storage.snapshot(name) @asyncio.coroutine def _get_vm(self, name, conf): """ :param conf: config.provider.vms item """ LOG.debug("Creating VM %s" % name) image = conf.get("image") if image: yield from self.build_image(image) else: image = name rnd_name = utils.get_rnd_name("rci_" + name) yield from self.storage.clone(image, rnd_name) vm = VM(self, name, conf) files = yield from self.storage.list_files(rnd_name) vm.disks.append(rnd_name) for f in files: vm.add_disk(f) for net in conf["net"]: net = net.split(" ") if len(net) == 1: vm.add_net(net[0]) else: vm.add_net(net[0], mac=net[1]) yield from vm.boot() return vm @asyncio.coroutine def get_vm_for_job(self, name, job): """ :param str name: vm name :param Job job: """ conf = copy.deepcopy(self.config["vms"][name]) if "net" not in conf: conf["net"] = ["virbr0"] for i, net in enumerate(conf["net"]): ifname = net.split(" ") if ifname[0].endswith("%"): _br = self._job_bridge_numbers.get(job, {}) brname = _br.get(ifname[0]) LOG.debug("Got %s for %s (%s)" % (brname, job, self)) if not brname: brname = yield from self._get_bridge(ifname[0][:-1]) self._job_bridge_numbers.setdefault(job, {}) self._job_bridge_numbers[job][ifname[0]] = brname LOG.debug("Created %s for %s (%s)" % (brname, job, self)) new = conf["net"][i].replace(ifname[0], brname) conf["net"][i] = new vm = yield from self._get_vm(name, conf) if job not in self._job_vms: self._job_vms[job] = [] self._job_vms[job].append(vm) return vm @asyncio.coroutine def cleanup(self, job): for vm in self._job_vms.pop(job, []): yield from vm.destroy() with (yield from self.bridge_lock): self._job_bridge_numbers.pop(job, None) @asyncio.coroutine def _get_bridge(self, prefix): with (yield from self.bridge_lock): err, data, err = yield from self.ssh.out(["ip", "link", "list"]) nums = set() for line in data.splitlines(): m = IFACE_RE.match(line) if m: if m.group(1) == prefix: nums.add(int(m.group(2))) for i in range(len(nums) + 1): if i not in nums: br = "%s%d" % (prefix, i) break yield from self.ssh.run( ["ip", "link", "add", br, "type", "bridge"]) yield from self.ssh.run(["ip", "link", "set", br, "up"]) return br