Exemple #1
0
    def sync_recipients_to_list(self,
                                list_id,
                                recipients,
                                delete_recipients=True):
        stats = {}
        stats['create_recipients'] = self._create_recipients(recipients)

        all_recipients_lookup = CaseInsensitiveDict(
            {r.email: r.id
             for r in self._all_recipients()})
        list_recipients = list(self._list_recipients(list_id))

        recipients_not_assigned_to_list_lookup = CaseInsensitiveDict({
            r['email']: all_recipients_lookup[r['email']]
            for r in recipients if r['email'] in all_recipients_lookup
        })
        for recipient in list_recipients:
            recipients_not_assigned_to_list_lookup.pop(recipient.email, None)

        recipient_ids_to_add = list(
            recipients_not_assigned_to_list_lookup.values())
        stats['add_recipients_to_list'] = self._add_recipients_to_list(
            list_id, recipient_ids_to_add)

        if delete_recipients:
            contact_to_delete_lookup = CaseInsensitiveDict(
                {c.email: c.id
                 for c in list_recipients})
            for recipient in recipients:
                contact_to_delete_lookup.pop(recipient['email'], None)

            recipients_ids_to_delete = list(contact_to_delete_lookup.values())

            stats[
                'delete_recipient_from_list'] = self._delete_recipient_from_list(
                    list_id, recipients_ids_to_delete)
            stats['delete_recipients'] = self._delete_recipients(
                recipients_ids_to_delete)

        return stats
Exemple #2
0
class StateMachine(object):
    """ Helper class that tracks the state of different entities. """
    def __init__(self, bus):
        self._states = CaseInsensitiveDict()
        self._bus = bus
        self._lock = threading.Lock()

    def entity_ids(self, domain_filter=None):
        """ List of entity ids that are being tracked. """
        if domain_filter is not None:
            domain_filter = domain_filter.lower()

            return [
                state.entity_id for key, state in self._states.lower_items()
                if util.split_entity_id(key)[0] == domain_filter
            ]
        else:
            return list(self._states.keys())

    def all(self):
        """ Returns a list of all states. """
        return [state.copy() for state in self._states.values()]

    def get(self, entity_id):
        """ Returns the state of the specified entity. """
        state = self._states.get(entity_id)

        # Make a copy so people won't mutate the state
        return state.copy() if state else None

    def get_since(self, point_in_time):
        """
        Returns all states that have been changed since point_in_time.
        """
        point_in_time = util.strip_microseconds(point_in_time)

        with self._lock:
            return [
                state for state in self._states.values()
                if state.last_updated >= point_in_time
            ]

    def is_state(self, entity_id, state):
        """ Returns True if entity exists and is specified state. """
        return (entity_id in self._states
                and self._states[entity_id].state == state)

    def remove(self, entity_id):
        """ Removes an entity from the state machine.

        Returns boolean to indicate if an entity was removed. """
        with self._lock:
            return self._states.pop(entity_id, None) is not None

    def set(self, entity_id, new_state, attributes=None):
        """ Set the state of an entity, add entity if it does not exist.

        Attributes is an optional dict to specify attributes of this state.

        If you just update the attributes and not the state, last changed will
        not be affected.
        """

        new_state = str(new_state)
        attributes = attributes or {}

        with self._lock:
            old_state = self._states.get(entity_id)

            is_existing = old_state is not None
            same_state = is_existing and old_state.state == new_state
            same_attr = is_existing and old_state.attributes == attributes

            # If state did not exist or is different, set it
            if not (same_state and same_attr):
                last_changed = old_state.last_changed if same_state else None

                state = self._states[entity_id] = \
                    State(entity_id, new_state, attributes, last_changed)

                event_data = {'entity_id': entity_id, 'new_state': state}

                if old_state:
                    event_data['old_state'] = old_state

                self._bus.fire(EVENT_STATE_CHANGED, event_data)

    def track_change(self, entity_ids, action, from_state=None, to_state=None):
        """
        Track specific state changes.
        entity_ids, from_state and to_state can be string or list.
        Use list to match multiple.

        Returns the listener that listens on the bus for EVENT_STATE_CHANGED.
        Pass the return value into hass.bus.remove_listener to remove it.
        """
        from_state = _process_match_param(from_state)
        to_state = _process_match_param(to_state)

        # Ensure it is a lowercase list with entity ids we want to match on
        if isinstance(entity_ids, str):
            entity_ids = (entity_ids.lower(), )
        else:
            entity_ids = tuple(entity_id.lower() for entity_id in entity_ids)

        @ft.wraps(action)
        def state_listener(event):
            """ The listener that listens for specific state changes. """
            if event.data['entity_id'].lower() in entity_ids and \
                    'old_state' in event.data and \
                    _matcher(event.data['old_state'].state, from_state) and \
                    _matcher(event.data['new_state'].state, to_state):

                action(event.data['entity_id'], event.data['old_state'],
                       event.data['new_state'])

        self._bus.listen(EVENT_STATE_CHANGED, state_listener)

        return state_listener
Exemple #3
0
    def exportForTexFile(
        self,
        texFileName,
        outFileName,
        overwrite=False,
        autosave=True,
        updateExisting=False,
        removeUnused=False,
        reorder=False,
        newOperation=True,
    ):
        """Reads a .tex file looking for the \cite{} commands,
        collects the bibtex entries cited in the text and
        stores them in a bibtex file.
        The entries are taken from the database first,
        or from INSPIRE-HEP if possible.
        The downloaded entries are saved in the database.

        Parameters:
            texFileName: the name (or a list of names)
                of the considered .tex file(s)
            outFileName: the name of the output file,
                where the required entries will be added
            overwrite (boolean, default False):
                if True, the previous version of the file is replaced
                and no backup copy is created
            autosave (boolean, default True):
                if True, the changes to the database are automatically saved.
            updateExisting (boolean, default False):
                if True, remove duplicates and update entries
                that have been chenged in the DB
            removeUnused (boolean, default False):
                if True, remove bibtex entries that are no more cited
                in the tex files
            reorder (boolean, default False):
                if True, reorder (not update!) the bibtex entries
                in the bib files before adding the new ones
            newOperation (boolean, default True):
                reset the self.existingBibsList and read file .bib content.
                Time consuming! better to just keep it updated
                when using multiple texs...

        Output:
            True if successful, False if errors occurred
        """
        db = bibtexparser.bibdatabase.BibDatabase()

        def printOutput(
            reqBibkeys, miss, retr, nFound, unexp, nKeys, warn, totalCites, full=False
        ):
            """Print information on the process"""
            pBLogger.info(exstr.resume)
            if totalCites is not None:
                pBLogger.info(exstr.keysFound % totalCites)
            pBLogger.info(exstr.newKeysFound % len(reqBibkeys))
            j = ", "
            if full:
                pBLogger.info(j.join(reqBibkeys))
            if len(miss) > 0:
                pBLogger.info(exstr.missingEntries % len(miss))
                if full:
                    pBLogger.info(j.join(miss))
            if len(retr) > 0:
                pBLogger.info(exstr.retrievedEntries % len(retr))
                pBLogger.info(j.join(retr))
            if len(nFound) > 0:
                pBLogger.info(exstr.entriesNotFound % len(nFound))
                pBLogger.info(j.join(nFound))
            if len(unexp) > 0:
                pBLogger.info(exstr.unexpectedForEntries % len(unexp))
                pBLogger.info(j.join(unexp))
            if len(nKeys.keys()) > 0:
                pBLogger.info(
                    exstr.nonMatchingEntries % len(nKeys.keys())
                    + "\n".join(["'%s' => '%s'" % (k, n) for k, n in nKeys.items()])
                )
            pBLogger.info(exstr.totalWarnings % warn)

        def saveEntryOutBib(a, m=None):
            """Remove unwanted fields and add the bibtex entry
            to the output file

            Parameters:
                a: the bibtex entry
                m: the ID (bibtex key) of the entry,
                    if it is not the default one
            """
            entry = (
                bibtexparser.bparser.BibTexParser(common_strings=True)
                .parse(a)
                .entries[0]
            )
            for u in self.unwantedFields:
                try:
                    del entry[u]
                except KeyError:
                    pass
            if m is not None:
                m = m.strip()
                if m != entry["ID"].strip():
                    entry["ID"] = m
            db.entries = [entry]
            bibf = pbWriter.write(db)
            try:
                with open(outFileName, "a") as o:
                    o.write(bibf)
                    pBLogger.info(exstr.entryInserted % m)
            except IOError:
                pBLogger.exception(exstr.errorWrite % outFileName)
                return False

        def removeUnusedBibtexs(existingBibsDict):
            """Functions that reads the list of bibtex entries
            in the existing .bib file and removes
            the ones that are not inside \cite commands
            """
            newDict = {}
            notFound = []
            for k, v in existingBibsDict.items():
                if k in self.allCitations:
                    newDict[k] = existingBibsDict[k]
                else:
                    notFound.append(k)
            db.entries = [
                newDict[k]
                for k in sorted(
                    [e["ID"] for e in newDict.values()], key=lambda s: s.lower()
                )
            ]
            bibf = pbWriter.write(db)
            try:
                with open(outFileName, "w") as o:
                    o.write(exstr.byPhysbiblio + bibf)
                    pBLogger.info(exstr.entriesRemoved % notFound)
            except IOError:
                pBLogger.exception(exstr.errorWrite % outFileName)

        self.exportForTexFlag = True
        pBLogger.info(exstr.startEFTF)
        pBLogger.info(exstr.readFrom % texFileName)
        pBLogger.info(exstr.saveTo % outFileName)
        if autosave:
            pBLogger.info(exstr.autoSave)

        missing = []
        newKeys = {}
        notFound = []
        requiredBibkeys = []
        retrieved = []
        unexpected = []
        warnings = 0
        totalCites = 0

        # if overwrite, reset the output file
        if overwrite:
            updateExisting = False
            removeUnused = False
            reorder = False
            try:
                with open(outFileName, "w") as o:
                    o.write(exstr.byPhysbiblio)
            except IOError:
                pBLogger.exception(exstr.cannotWrite)
                return False

        # read previous content of output file, if any
        try:
            with open(outFileName, "r") as f:
                existingBibText = f.readlines()
        except IOError:
            pBLogger.error(exstr.cannotRead % outFileName)
            try:
                open(outFileName, "w").close()
            except IOError:
                pBLogger.exception(exstr.cannotCreate % outFileName)
                return False
            existingBibText = ""

        # this is time consuming if there are many entries.
        # Do not load it every time for multiple texs!
        if newOperation:
            self.allCitations = set([])
            if existingBibText != "":
                self.existingBibsList = pBDB.bibs.parseAllBibtexs(
                    existingBibText, verbose=False
                )
            else:
                self.existingBibsList = []
        # work with dictionary, so that if there are repeated entries
        # (entries with same ID) they are automatically discarded
        existingBibsDict = CaseInsensitiveDict()
        for e in self.existingBibsList:
            existingBibsDict[e["ID"]] = e

        # if requested, do some cleaning
        if updateExisting or reorder:
            # update entry from DB if existing
            if updateExisting:
                for k, v in existingBibsDict.items():
                    e = pBDB.bibs.getByBibtex(k, saveQuery=False)
                    if len(e) > 0 and e[0]["bibtexDict"] != v:
                        existingBibsDict[k] = e[0]["bibtexDict"]
                        if existingBibsDict[k]["ID"].lower() != k.lower():
                            existingBibsDict[k]["ID"] = k

            # write new (updated) bib content
            # (so also repeated entries are removed)
            db.entries = [
                existingBibsDict[k]
                for k in sorted(
                    [e["ID"] for e in existingBibsDict.values()],
                    key=lambda s: s.lower(),
                )
            ]
            bibf = pbWriter.write(db)
            try:
                with open(outFileName, "w") as o:
                    o.write(exstr.byPhysbiblio + bibf)
                    pBLogger.info(exstr.outputUpdated)
            except IOError:
                pBLogger.exception(exstr.errorWrite % outFileName)

        # if there is a list of tex files, run this function
        # for each of them...no updateExisting and removeUnused!
        if isinstance(texFileName, list):
            if len(texFileName) == 0:
                return False
            elif len(texFileName) == 1:
                texFileName = texFileName[0]
            else:
                for t in texFileName:
                    req, m, ret, nF, un, nK, w, cits = self.exportForTexFile(
                        t,
                        outFileName,
                        overwrite=False,
                        autosave=autosave,
                        updateExisting=False,
                        removeUnused=False,
                        reorder=False,
                        newOperation=False,
                    )
                    requiredBibkeys += req
                    missing += m
                    retrieved += ret
                    notFound += nF
                    unexpected += un
                    for k, v in nK.items():
                        newKeys[k] = v
                    warnings += w
                pBLogger.info(exstr.doneAllTexs)
                if removeUnused:
                    removeUnusedBibtexs(existingBibsDict)
                printOutput(
                    requiredBibkeys,
                    missing,
                    retrieved,
                    notFound,
                    unexpected,
                    newKeys,
                    warnings,
                    len(self.allCitations),
                    full=True,
                )
                return (
                    requiredBibkeys,
                    missing,
                    retrieved,
                    notFound,
                    unexpected,
                    newKeys,
                    warnings,
                    len(self.allCitations),
                )

        # read the texFile
        keyscont = ""
        try:
            with open(texFileName) as r:
                keyscont += r.read()
        except IOError:
            pBLogger.exception(exstr.errorNoFile % texFileName)
            return False

        # extract \cite* commands
        matchKeys = "([0-9A-Za-z_\-':\+\.\&]+)"
        cite = re.compile(
            "\\\\(cite|citep|citet)\{([\n ]*" + matchKeys + "[,]?[\n ]*)*\}",
            re.MULTILINE,
        )  # find \cite{...}
        citeKeys = re.compile(
            matchKeys, re.MULTILINE
        )  # find the keys inside \cite{...}
        citaz = [m for m in cite.finditer(keyscont) if m != ""]
        pBLogger.info(exstr.citeFound % len(citaz))

        # extract required keys from \cite* commands
        for c in citaz:
            try:
                for e in [l.group(1) for l in citeKeys.finditer(c.group())]:
                    e = e.strip()
                    if e == "" or e in ["cite", "citep", "citet"]:
                        continue
                    self.allCitations.add(e)
                    if e not in requiredBibkeys:
                        try:
                            # this it's just to check if already present
                            tmp = existingBibsDict[e]
                        except KeyError:
                            requiredBibkeys.append(e)
            except (IndexError, AttributeError, TypeError):
                pBLogger.warning(exstr.errorCitation % c.group())
                a = []
        pBLogger.info(
            exstr.newKeysTotal % (len(requiredBibkeys), len(self.allCitations))
        )

        # if True, remove unused bibtex entries
        if removeUnused:
            removeUnusedBibtexs(existingBibsDict)

        # check what is missing in the database and insert/import
        # what is needed:
        for m in requiredBibkeys:
            if m.strip() == "":
                continue
            entry = pBDB.bibs.getByBibtex(m)
            entryMissing = len(entry) == 0
            if not self.exportForTexFlag:
                # if flag set, stop execution and
                # go to the end skipping everything
                continue
            elif not entryMissing:
                # if already in the database, just insert it as it is
                bibtex = entry[0]["bibtex"]
                bibtexDict = entry[0]["bibtexDict"]
            else:
                # if no entry is found, mark it as missing
                missing.append(m)
                # if not present, try INSPIRE import
                pBLogger.info(exstr.keyMissing % m)
                newWeb = pBDB.bibs.loadAndInsert(m, returnBibtex=True)
                newCheck = pBDB.bibs.getByBibtex(m, saveQuery=False)

                # if the import worked, insert the entry
                if len(newCheck) > 0:
                    # if key is not matching,
                    # just replace it in the exported bib and print a message
                    if m.strip().lower() != newCheck[0]["bibkey"].lower():
                        warnings += 1
                        newKeys[m] = newCheck[0]["bibkey"]
                    if newCheck[0]["bibkey"] not in retrieved:
                        retrieved.append(newCheck[0]["bibkey"])
                    pBDB.catBib.insert(
                        pbConfig.params["defaultCategories"], newCheck[0]["bibkey"]
                    )
                    bibtex = newCheck[0]["bibtex"]
                    bibtexDict = newCheck[0]["bibtexDict"]
                else:
                    # if nothing found, add a warning for the end
                    warnings += 1
                    notFound.append(m)
                    continue
                pBLogger.info("\n")
            # save in output file
            try:
                bibtexDict["ID"] = m
                self.existingBibsList.append(bibtexDict)
                saveEntryOutBib(bibtex, m)
            except:
                unexpected.append(m)
                pBLogger.exception(exstr.unexpectedEntry % m)

        if autosave:
            pBDB.commit()
        printOutput(
            requiredBibkeys,
            missing,
            retrieved,
            notFound,
            unexpected,
            newKeys,
            warnings,
            len(self.allCitations),
        )
        return (
            requiredBibkeys,
            missing,
            retrieved,
            notFound,
            unexpected,
            newKeys,
            warnings,
            len(self.allCitations),
        )