Esempio n. 1
0
    def _remove(self, work):
        print(etree.tostring(work, pretty_print=True).decode())
        res = etree.Element("wu", id=work.attrib["id"])
        # Deconstruct wu xpath
        wu_mrx = MRXpath(work.attrib["id"])
        bundle_id = wu_mrx.id()

        # Identify package directory
        dest = os.path.join(self.cache_dir, bundle_id)

        # Fail if not exist
        if not os.path.isdir(dest):
            msg = "Package directory does not exist"
            l.emsg(msg)
            res.attrib["status"] = "error"
            res.attrib["message"] = msg

        # Nuke the directory
        try:
            shutil.rmtree(dest)
            res.attrib["status"] = success
        except:
            msg = "Could not remove package directory."
            l.emsg(msg)
            res.attrib["status"] = "error"
            res.attrib["message"] = msg

        return res
Esempio n. 2
0
    def _remove(self, work):
        res = etree.Element("wu", id=work.attrib["id"])
        # Deconstruct wu xpath
        wu_mrx = MRXpath(work.attrib["id"])
        wu_id = wu_mrx.id()
        # Iterate over top level packages until we find one with the same id
        # Use a generator to save memory
        e = (elt for elt in self.s_elt if elt.attrib["id"] == wu_id)
        work_elt = next(e)

        back = self._process(work_elt, "uninstall")

        if back:
            l.emsg(back)
            res.attrib["status"] = "error"
            res.attrib["message"] = back
        else:
            res.attrib["status"] = "success"
        return res
Esempio n. 3
0
 def test_name_id(self):
     mrx = MRXpath('/a/b/c[\'/d/e[@id="1"]\']')
     self.assertEqual(mrx.name(), "c")
     self.assertEqual(mrx.id(), '/d/e[@id="1"]')
Esempio n. 4
0
    def do_update(self):
        """Perform an update cycle"""
        self.results = None
        if context.desired_status.getroot().get("autoconstructed"):
            raise ValueError("Refusing to use autoconstructed status.")
        l.dmsg("desired:\n%s" % pstring(self.desired_status()), 10)
        l.dmsg("initial:\n%s" % pstring(self.initial_status()), 10)
        comp = XMLCompare(copy.deepcopy(self.initial_status()), self.desired_status())
        l.dmsg("xpaths by state:\n" + pprint.pformat(comp.bystate), 10)

        # See if we have to do a self update
        iv_mrx = MRXpath('/status/worker[@id="__machination__"]/installedVersion')
        selfupdate = False
        selfupdate_bundles = set()
        if iv_mrx.to_xpath() in comp.find_work():
            # installedVersion has changed somehow
            wus, working = generate_wus({iv_mrx.to_xpath()}, comp)
            wu = wus[0] if wus else etree.Element("wu", op="nothing")
            if wu.get("op") == "add" or wu.get("op") == "deepmod":
                # Definitely updating
                l.lmsg("{} on {}: need to self update".format(wu.get("op"), iv_mrx.to_xpath()), 3)
                selfupdate = True

                # Check for bundles and add to selfupdate_bundles
                for ivb_elt in wu[0].xpath("machinationFetcherBundle"):
                    bid = MRXpath.quote_id(MRXpath, ivb_elt.get("id"))
                    bundle_xp = MRXpath("/status/worker[@id='fetcher']/bundle['{}']".format(bid)).to_xpath()
                    selfupdate_bundles.add(bundle_xp)

                # only interested in bundles with a work unit to do
                selfupdate_bundles = selfupdate_bundles & comp.find_work()

        #                fetcher_wus, working = generate_wus(todo, comp)
        #                # use the fetcher worker to get bundles
        #                worker = self.worker('fetcher')
        #                for wu in fetcher_wus:

        try:
            deps = self.desired_status().xpath("/status/deps")[0]
        except IndexError:
            deps = etree.fromstring("<status><deps/></status>")[0]
        wudeps = comp.wudeps(deps.iterchildren(tag=etree.Element))

        # Track success/failure of work units.
        #
        # Before a work unit is attempted work_status[wu] should not
        # exist.
        #
        # Afterward, work_status[wu] should contain an array with a
        # status (True = succeeded, False = failed) and either the wu
        # element or an error message as appropriate:
        #
        # {
        #  wu1: [True, wu_elt],
        #  wu2: [False, "Worker 'splat' not available"]
        #  wu3: [False, "Dependency 'wu2' failed"]
        # }
        work_status = {}
        # set up a dictionary:
        # { work_unit: [list, of, units, work_unit, depends, on] }
        work_depends = {}

        if selfupdate:
            # installedVersion depends on all bundles
            wudeps.extend([[x, iv_mrx.to_xpath()] for x in selfupdate_bundles])
            # Everything else apart from selfupdate bundles depends on
            # installedVersion
            wudeps.extend(
                [[iv_mrx.to_xpath(), x] for x in (comp.find_work() - selfupdate_bundles - {iv_mrx.to_xpath()})]
            )
        for dep in wudeps:
            if work_depends.get(dep[1]):
                # entry for dep[1] already exists, add to it
                work_depends.get(dep[1]).append(dep[0])
            else:
                # entry for dep[1] does not exist, create it
                work_depends[dep[1]] = [dep[0]]
        #        l.dmsg('work_depends = {}'.format(pprint.pformat(work_depends)))
        # we need to make all workunits depend on something for
        # topsort to work
        if selfupdate:
            # selfupdate_bundles should be done first
            wudeps.extend([["", x] for x in selfupdate_bundles])
        else:
            wudeps.extend([["", x] for x in comp.find_work()])
        #        l.dmsg('wudeps = {}'.format(pprint.pformat(wudeps)))

        wu_updated_status = copy.deepcopy(self.initial_status())

        i = 0
        #        failures = []
        for lev in iter(topsort.topsort_levels(wudeps)):
            i += 1
            if i == 1:
                # this is the fake workunit '' we put in above
                continue
            l.dmsg("xpaths for level {}:\n".format(i) + pprint.pformat(lev), 10)
            wus, working_elt = generate_wus(set(lev), comp)

            #            l.dmsg(pstring(self.initial_status(),10))
            #            l.dmsg(pstring(self.desired_status(),10))
            for wu in wus:
                l.dmsg(pstring(wu), 10)

            add_map = {}
            for wu in wus:
                # If it's an add for a worker, add the worker element
                wu_mrx = MRXpath(wu.get("id"))
                if wu_mrx.to_noid_path() == "/status/worker" and wu.get("op") == "add":
                    l.lmsg("Adding worker element " + wu_mrx.to_xpath())
                    wu_updated_status.xpath("/status")[0].append(etree.Element("worker", id=wu_mrx.id()))
                    continue

                # If it's an add, we need to add it to the add_map so
                # that adds still function properly if they get out of
                # order or previous adds have failed.
                if wu.get("op") == "add":
                    add_map[wu.get("id")] = get_fullpos(wu.get("pos"), MRXpath(wu.get("id")).parent())

                # check to make sure any dependencies have been done
                check = self.check_deps(wu, work_depends, work_status)
                if not check[0]:
                    l.wmsg("Failing {}: dep {} failed".format(wu.get("id"), check[1]))
                    work_status[wu.get("id")] = [False, "Dependency '{}' failed".format(check[1])]
                    # don't include this wu in work to be done
                    continue

                wname = MRXpath(wu.get("id")).workername(prefix="/status")
                worker = self.worker(wname)
                l.lmsg("dispatching to " + wname)
                l.dmsg("work:\n" + pstring(wu))
                if worker:
                    # need to wrap the wu in a wus element
                    workelt = etree.Element("wus", worker=wname)
                    workelt.append(copy.deepcopy(wu))
                    try:
                        results = worker.do_work(workelt)
                    except Exception as e:
                        exc_type, exc_value, exc_tb = sys.exc_info()
                        # There's only one, but in future there might
                        # be more - loop over them.
                        for curwu in workelt:
                            work_status[curwu.get("id")] = [False, "Exception in worker {}\n{}".format(wname, str(e))]
                            l.emsg(
                                "Exception during {} - failing it\n{}".format(
                                    curwu.get("id"), "".join(traceback.format_tb(exc_tb)) + repr(e)
                                )
                            )
                    else:
                        self.process_results(results, workelt, work_status)
                        wid = wu.get("id")
                        completed = work_status.get(wid)
                        if completed[0]:
                            # Apply successes to wu_updated_status
                            l.dmsg("Marking {} succeeded.".format(wid))
                            wu_updated_status = apply_wu(completed[1], wu_updated_status, add_map=add_map)
                        else:
                            l.dmsg("Marking {} failed.".format(wid))
                #                            failures.append([wid, completed[1]])

                else:
                    # No worker: fail this set of work
                    work_status[wu.get("id")] = [False, "No worker '{}'".format(wname)]

            # TODO(colin): parallelise downloads and other work

        # Report successes
        l.lmsg(
            "The following work units reported success:\n{}".format(
                pprint.pformat([k for k, v in work_status.items() if v[0]])
            )
        )
        # Report failures.
        l.wmsg(
            "The following work units reported failure:\n{}".format(
                pprint.pformat([[k, v[1]] for k, v in work_status.items() if not v[0]])
            )
        )
        # write calculated status to file
        fname = os.path.join(context.status_dir(), "previous-status.xml")
        with open(fname, "w") as prev:
            prev.write(etree.tostring(wu_updated_status, pretty_print=True).decode())

        # see how the status has changed including calls to generate_status()
        new_status = self.gather_status()

        # write this status out as previous_status.xml
        with open(fname, "w") as prev:
            prev.write(etree.tostring(new_status, pretty_print=True).decode())