Exemple #1
0
 def get_bibliography_entries(self, docname, id_, warn):
     """Return filtered bibliography entries, sorted by citation order."""
     # get entries, ordered by bib file occurrence
     entries = OrderedDict((entry.key, entry)
                           for entry in self._get_bibliography_entries(
                               docname=docname, id_=id_, warn=warn))
     # order entries according to which were cited first
     # first, we add all keys that were cited
     # then, we add all remaining keys
     sorted_entries = []
     for key in self.get_all_cited_keys():
         try:
             entry = entries.pop(key)
         except KeyError:
             pass
         else:
             sorted_entries.append(entry)
     sorted_entries += entries.itervalues()
     return sorted_entries
Exemple #2
0
class XRCCodeWriter(BaseLangCodeWriter, wcodegen.XRCMixin):
    """\
    Code writer class for writing XRC XML code out of the designed GUI
    elements.
    """
    xrc_objects = None
    """\
    dictionary of active L{XrcObject} instances: during the code generation
    it stores all the non-sizer objects that have children (i.e. frames,
    dialogs, panels, notebooks, etc.), while at the end of the code
    generation, before L{finalize} is called, it contains only the true
    toplevel objects (frames and dialogs), and is used to write their XML
    code (see L{finalize}). The other objects are deleted when L{add_object}
    is called with their corresponding code_object as argument
    (see L{add_object})
    """

    global_property_writers = {
        'font': FontPropertyHandler,
        'events': EventsPropertyHandler,
        'extraproperties': ExtraPropertiesPropertyHandler,
    }
    """\
    Dictionary whose items are custom handlers for widget properties
    """

    property_writers = {}
    """\
    Dictionary of dictionaries of property handlers specific for a widget
    the keys are the class names of the widgets

    Example: property_writers['wxRadioBox'] = {'choices', choices_handler}
    """

    obj_builders = {}
    """\
    Dictionary of ``writers'' for the various objects
    """

    tmpl_encoding = '<?xml version="1.0" encoding="%s"?>\n'
    tmpl_generated_by = '<!-- %(generated_by)s -->'

    use_names_for_binding_events = False

    # inject different XRC objects
    XrcObject = XrcObject
    SizerItemXrcObject = SizerItemXrcObject
    SpacerXrcObject = SpacerXrcObject
    DefaultXrcObject = DefaultXrcObject
    NotImplementedXrcObject = NotImplementedXrcObject

    def __init__(self):
        BaseLangCodeWriter.__init__(self)
        # Inject to all classed derived from WrcObject
        if not hasattr(XrcObject, 'tabs'):
            XrcObject.tabs = self.tabs
        if not hasattr(XrcObject, '_format_comment'):
            XrcObject._format_comment = self._format_comment

    def init_lang(self, app_attrs):
        # for now we handle only single-file code generation
        if self.multiple_files:
            raise errors.WxgXRCMultipleFilesNotSupported()

        # overwrite existing sources always
        self._overwrite = True

        self.output_file_name = app_attrs['path']
        self.out_file = StringIO.StringIO()
        self.out_file.write('\n<resource version="2.3.0.1">\n')
        self.curr_tab = 1
        self.xrc_objects = OrderedDict()

    def finalize(self):
        # write the code for every toplevel object
        for obj in self.xrc_objects.itervalues():
            obj.write(self.out_file, 1)
        self.out_file.write('</resource>\n')
        # store the contents to file
        self.save_file(self.output_file_name, self.out_file.getvalue())

    def add_app(self, app_attrs, top_win_class):
        """\
        In the case of XRC output, there's no wxApp code to generate
        """
        pass

    def add_object(self, unused, sub_obj):
        """\
        Adds the object sub_obj to the XRC tree. The first argument is unused.
        """
        # what we need in XRC is not top_obj, but sub_obj's true parent
        top_obj = sub_obj.parent
        builder = self.obj_builders.get(sub_obj.base, DefaultXrcObject)
        try:
            # check whether we already created the xrc_obj
            xrc_obj = sub_obj.xrc
        except AttributeError:
            xrc_obj = builder(sub_obj)  # builder functions must return a
            # subclass of XrcObject
            sub_obj.xrc = xrc_obj
        else:
            # if we found it, remove it from the self.xrc_objects dictionary
            # (if it was there, i.e. the object is not a sizer), because this
            # isn't a true toplevel object
            if sub_obj in self.xrc_objects:
                del self.xrc_objects[sub_obj]
        # let's see if sub_obj's parent already has an XrcObject: if so, it
        # is temporarily stored in the self.xrc_objects dict...
        if top_obj in self.xrc_objects:
            top_xrc = self.xrc_objects[top_obj]
        else:
            # ...otherwise, create it and store it in the self.xrc_objects
            # dict
            top_xrc = self.obj_builders.get(top_obj.base,
                                            DefaultXrcObject)(top_obj)
            top_obj.xrc = top_xrc
            self.xrc_objects[top_obj] = top_xrc
        top_obj.xrc.children.append(xrc_obj)

    def add_sizeritem(self, unused, sizer, obj, option, flag, border):
        """\
        Adds a sizeritem to the XRC tree. The first argument is unused.
        """
        # what we need in XRC is not toplevel, but sub_obj's true parent
        toplevel = obj.parent
        top_xrc = toplevel.xrc
        obj_xrc = obj.xrc
        try:
            sizer_xrc = sizer.xrc
        except AttributeError:
            # if the sizer has not an XrcObject yet, create it now
            sizer_xrc = self.obj_builders.get(sizer.base,
                                              DefaultXrcObject)(sizer)
            sizer.xrc = sizer_xrc
        # we now have to move the children from 'toplevel' to 'sizer'
        index = top_xrc.children.index(obj_xrc)
        if obj.klass == 'spacer':
            w = obj.properties.get('width', '0')
            h = obj.properties.get('height', '0')
            obj_xrc = SpacerXrcObject('%s, %s' % (w, h), str(option),
                                      str(flag), str(border))
            sizer.xrc.children.append(obj_xrc)
        else:
            sizeritem_xrc = SizerItemXrcObject(obj_xrc, str(option), str(flag),
                                               str(border))
            sizer.xrc.children.append(sizeritem_xrc)
        del top_xrc.children[index]

    def add_class(self, code_obj):
        """\
        Add class behaves very differently for XRC output than for other
        languages (i.e. python): since custom classes are not supported in
        XRC, this has effect only for true toplevel widgets, i.e. frames and
        dialogs. For other kinds of widgets, this is equivalent to add_object
        """
        if not self.xrc_objects.has_key(code_obj):
            builder = self.obj_builders.get(code_obj.base, DefaultXrcObject)
            xrc_obj = builder(code_obj)
            code_obj.xrc = xrc_obj
            # add the xrc_obj to the dict of the toplevel ones
            self.xrc_objects[code_obj] = xrc_obj

    def generate_code_id(self, obj, id=None):
        return '', ''

    def _format_comment(self, msg):
        return '<!-- %s -->' % escape(msg.rstrip())

    def _quote_str(self, s):
        return s
Exemple #3
0
class Root(object):
    """Container for all the processed data."""

    #: An OrderedDict mapping entry names to Entry instances.
    entries = None

    #: A list of Gloss instances.
    glosses = None

    #: A dict mapping stems in definitions to lists of Entry instances.
    definition_stems = None

    #: A dict mapping stems in notes to lists of Entry instances.
    note_stems = None

    #: A dict mapping the stem of glosses to lists of Gloss instances.
    gloss_stems = None

    #: A dict mapping grammatical classes to font sizes in ems.
    class_scales = None

    #: A dict mapping grammatical classes to lists
    #: of ``[chapter, section]`` lists.
    cll = None

    #: A dict mapping grammatical classes to terminating grammatical classes.
    terminators = None

    #: A string that changes if the database changes.
    etag = None

    def __init__(self, db):
        cfg = db.app.config

        root_path = db.app.root_path
        jbovlaste = cfg.get('VLASISKU_JBOVLASTE', 'data/jbovlaste.xml')
        class_scales = cfg.get('VLASISKU_CLASS_SCALES',
                               'data/class-scales.yml')
        cll = cfg.get('VLASISKU_CLL', 'data/cll.yml')
        terminators = cfg.get('VLASISKU_TERMINATORS', 'data/terminators.yml')

        self.class_scales = load_yaml(join(root_path, class_scales))
        self.cll = load_yaml(join(root_path, cll))
        self.terminators = load_yaml(join(root_path, terminators))

        with open(join(root_path, jbovlaste)) as f:
            xml = ElementTree.parse(f)
            print 'Rebuilding database; this might take a minute or two.  Printing a . for each thousand entries.'
            self._load_entries(xml)
            self._load_glosses(xml)

        self.etag = str(getmtime(join(root_path, jbovlaste)))

    def query(self, query):
        """Query database with query language.

        >>> from vlasisku.extensions import database
        >>> len(database.root.query('class:UI4')['matches'])
        6

        """
        parsed_query = parse_query(query)
        matches = set()
        entry = self.entries.get(query, None)
        if entry:
            matches.add(entry)

        if parsed_query['all']:
            words = []

            glosses = self.matches_gloss(parsed_query['all'], matches)
            matches.update(g.entry for g in glosses)

            affix = self.matches_affix(parsed_query['all'], matches)
            matches.update(affix)

            classes = self.matches_class(parsed_query['all'], matches)
            classes += [
                e for e in self.entries.itervalues()
                if e.grammarclass and e not in classes
                and re.split(r'[0-9*]', e.grammarclass)[0] == query
            ]
            matches.update(classes)

            types = self.matches_type(parsed_query['all'], matches)
            matches.update(types)

            definitions = self.matches_definition(parsed_query['all'], matches)
            matches.update(definitions)

            notes = self.matches_notes(parsed_query['all'], matches)
            matches.update(notes)

        else:
            words = self.matches_word(parsed_query['word'])
            matches.update(words)

            glosses = self.matches_gloss(parsed_query['gloss'], matches)
            matches.update(g.entry for g in glosses)

            affix = self.matches_affix(parsed_query['affix'], matches)
            matches.update(affix)

            classes = self.matches_class(parsed_query['class'], matches)
            matches.update(classes)

            types = self.matches_type(parsed_query['type'], matches)
            matches.update(types)

            definitions = self.matches_definition(parsed_query['definition'],
                                                  matches)
            matches.update(definitions)

            notes = self.matches_notes(parsed_query['notes'], matches)
            matches.update(notes)

        if parsed_query['word']:
            matches = set(e for e in self.matches_word(parsed_query['word'])
                          if e in matches)
        if parsed_query['gloss']:
            matches = set(g.entry
                          for g in self.matches_gloss(parsed_query['gloss'])
                          if g.entry in matches)
        if parsed_query['affix']:
            matches = set(e for e in self.matches_affix(parsed_query['affix'])
                          if e in matches)
        if parsed_query['class']:
            matches = set(e for e in self.matches_class(parsed_query['class'])
                          if e in matches)
        if parsed_query['type']:
            matches = set(e for e in self.matches_type(parsed_query['type'])
                          if e in matches)
        if parsed_query['definition']:
            matches = set(
                e for e in self.matches_definition(parsed_query['definition'])
                if e in matches)
        if parsed_query['notes']:
            matches = set(e for e in self.matches_notes(parsed_query['notes'])
                          if e in matches)

        words = [e for e in words if e in matches]
        glosses = [g for g in glosses if g.entry in matches]
        affix = [e for e in affix if e in matches]
        classes = [e for e in classes if e in matches]
        types = [e for e in types if e in matches]
        definitions = [e for e in definitions if e in matches]
        notes = [e for e in notes if e in matches]

        results = dict(locals())
        del results['self']
        return results

    def suggest(self, prefix):
        suggestions = []
        types = []
        entries = (e for e in self.entries.iterkeys() if e.startswith(prefix))
        glosses = (g.gloss for g in self.glosses if g.gloss.startswith(prefix))
        classes = set(e.grammarclass for e in self.entries.itervalues()
                      if e.grammarclass and e.grammarclass.startswith(prefix))
        for x in xrange(5):
            with ignore(StopIteration):
                suggestions.append(entries.next())
                types.append(self.entries[suggestions[-1]].type)
            with ignore(StopIteration):
                gloss = glosses.next()
                if ' ' in gloss:
                    suggestions.append('"%s"' % gloss)
                else:
                    suggestions.append(gloss)
                types.append('gloss')
            with ignore(KeyError):
                suggestions.append(classes.pop())
                types.append('class')
        return [prefix, suggestions, types]

    @selector
    def matches_word(self, queries, exclude):
        return (e for q in queries for e in self.entries.itervalues()
                if fnmatch(e.word, q))

    @selector
    def matches_gloss(self, queries, exclude):
        queries = (stem(q.lower()) for q in queries)
        return (g for q in queries for g in self.gloss_stems.get(q, [])
                if all(g in self.gloss_stems.get(q, []) for q in queries)
                if g.entry not in exclude)

    @selector
    def matches_affix(self, queries, exclude):
        return (e for e in self.entries.itervalues() if e not in exclude
                for q in queries if any(
                    fnmatch(a, q) for a in e.searchaffixes))

    @selector
    def matches_class(self, queries, exclude):
        return (e for q in queries for e in self.entries.itervalues()
                if e not in exclude if q == e.grammarclass)

    @selector
    def matches_type(self, queries, exclude):
        return (e for q in queries for e in self.entries.itervalues()
                if e not in exclude if fnmatch(e.type, q))

    @selector
    def matches_definition(self, queries, exclude):
        queries = (stem(q.lower()) for q in queries)
        return (e for q in queries for e in self.definition_stems.get(q, [])
                if all(e in self.definition_stems.get(q, []) for q in queries)
                if e not in exclude)

    @selector
    def matches_notes(self, queries, exclude):
        queries = (stem(q.lower()) for q in queries)
        return (e for q in queries for e in self.note_stems.get(q, [])
                if all(e in self.note_stems.get(q, []) for q in queries)
                if e not in exclude)

    def _load_entries(self, xml):
        processors = {
            'rafsi': self._process_rafsi,
            'selmaho': self._process_selmaho,
            'definition': self._process_definition,
            'notes': self._process_notes
        }

        self.entries = OrderedDict()
        self.definition_stems = {}
        self.note_stems = {}

        count = 0

        for type, _ in TYPES:
            for valsi in xml.findall('.//valsi'):
                if valsi.get('type') == type:
                    count += 1
                    if count % 1000 == 0:
                        sys.stdout.write('.')
                        sys.stdout.flush()
                    entry = Entry(self)
                    entry.type = type
                    entry.word = valsi.get('word')

                    if type in ('gismu', 'experimental gismu'):
                        entry.searchaffixes.append(entry.word)
                        entry.searchaffixes.append(entry.word[0:4])

                    for child in valsi.getchildren():
                        tag, text = child.tag, child.text
                        processors.get(tag, lambda a, b: None)(entry, text)

                    self.entries[entry.word] = entry

        for entry in self.entries.itervalues():
            if entry.notes:
                entry.notes = braces2links(entry.notes, self.entries)

    def _process_rafsi(self, entry, text):
        entry.affixes.append(text)
        entry.searchaffixes.append(text)

    def _process_selmaho(self, entry, text):
        entry.grammarclass = text
        for grammarclass, terminator in self.terminators.iteritems():
            if text == grammarclass:
                entry.terminator = terminator
            if text == terminator:
                entry.terminates.append(grammarclass)
        if text in self.cll:
            for path in self.cll[text]:
                section = '%s.%s' % tuple(path)
                link = 'http://dag.github.io/cll/%s/%s/'
                entry.cll.append((section, link % tuple(path)))

    def _process_definition(self, entry, text):
        if text is None:
            text = ""
        entry.definition = tex2html(text)
        entry.textdefinition = strip_html(entry.definition)
        tokens = re.findall(r"[\w']+", text, re.UNICODE)
        for token in set(tokens):
            add_stems(token, self.definition_stems, entry)

    def _process_notes(self, entry, text):
        entry.notes = tex2html(text)
        entry.textnotes = strip_html(entry.notes)
        tokens = re.findall(r"[\w']+", text, re.UNICODE)
        for token in set(tokens):
            add_stems(token, self.note_stems, entry)

    def _load_glosses(self, xml):
        self.glosses = []
        self.gloss_stems = {}

        # import pprint
        # pprint.pprint(dict(self.entries.items()))

        count = 0

        for type, _ in TYPES:
            for word in xml.findall('.//nlword'):
                count += 1
                if count % 1000 == 0:
                    sys.stdout.write('.')
                    sys.stdout.flush()
                # pprint.pprint(word.get('valsi'))

                entry = self.entries[word.get('valsi')]
                # pprint.pprint(entry)
                if entry.type == type:
                    gloss = Gloss()
                    gloss.gloss = word.get('word')
                    gloss.entry = entry
                    gloss.sense = word.get('sense')
                    gloss.place = word.get('place')
                    self.glosses.append(gloss)
                    add_stems(gloss.gloss, self.gloss_stems, gloss)
        print ''
        print 'Rebuild complete.'
class HasParameters(object):
    """This class provides an implementation of the IHasParameters interface."""

    _do_not_promote = ['get_expr_depends', 'get_referenced_compnames',
                       'get_referenced_varpaths', 'get_metadata']

    def __init__(self, parent):
        self._parameters = OrderedDict()
        self._allowed_types = ['continuous']
        if obj_has_interface(parent, ISolver):
            self._allowed_types.append('unbounded')
        self._parent = None if parent is None else weakref.ref(parent)

    def __getstate__(self):
        state = self.__dict__.copy()
        state['_parent'] = self.parent
        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        parent = state['_parent']
        self._parent = None if parent is None else weakref.ref(parent)

    @property
    def parent(self):
        """ The object we are a delegate of. """
        return None if self._parent is None else self._parent()

    def _item_count(self):
        """This is used by the replace function to determine if a delegate from
        the target object is 'empty' or not.  If it's empty, it's not an error
        if the replacing object doesn't have this delegate.
        """
        return len(self._parameters)

    def add_parameter(self, target, low=None, high=None,
                      scaler=None, adder=None, start=None,
                      fd_step=None, name=None, scope=None):
        """Adds a parameter or group of parameters to the driver.

        target: string or iter of strings or Parameter
            What the driver should vary during execution. A *target* is an
            expression that can reside on the left-hand side of an assignment
            statement, so typically it will be the name of a variable or
            possibly a subscript expression indicating an entry within an array
            variable, e.g., x[3]. If an iterator of targets is given, then the
            driver will set all targets given to the same value whenever it
            varies this parameter during execution. If a Parameter instance is
            given, then that instance is copied into the driver with any other
            arguments specified, overiding the values in the given parameter.

        low: float (optional)
            Minimum allowed value of the parameter. If scaler and/or adder
            is supplied, use the transformed value here. If target is an array,
            this may also be an array, but must have the same size.

        high: float (optional)
            Maximum allowed value of the parameter. If scaler and/or adder
            is supplied, use the transformed value here. If target is an array,
            this may also be an array, but must have the same size.

        scaler: float (optional)
            Value to multiply the possibly offset parameter value by. If target
            is an array, this may also be an array, but must have the same size.

        adder: float (optional)
            Value to add to parameter prior to possible scaling. If target is
            an array, this may also be an array, but must have the same size.

        start: any (optional)
            Value to set into the target or targets of a parameter before
            starting any executions. If not given, analysis will start with
            whatever values are in the target or targets at that time. If target
            is an array, this may also be an array, but must have the same size.

        fd_step: float (optional)
            Step-size to use for finite difference calculation. If no value is
            given, the differentiator will use its own default. If target is an
            array, this may also be an array, but must have the same size.

        name: str (optional)
            Name used to refer to the parameter in place of the name of the
            variable referred to in the parameter string.
            This is sometimes useful if, for example, multiple entries in the
            same array variable are declared as parameters.

        scope: object (optional)
            The object to be used as the scope when evaluating the expression.

        If neither "low" nor "high" is specified, the min and max will
        default to the values in the metadata of the variable being
        referenced.
        """

        if isinstance(target, (ParameterBase, ParameterGroup)):
            self._parameters[target.name] = target
            target.override(low, high, scaler, adder, start, fd_step, name)
        else:
            if isinstance(target, basestring):
                names = [target]
                key = target
            else:
                names = target
                key = tuple(target)

            if name is not None:
                key = name

            dups = set(self.list_param_targets()).intersection(names)
            if len(dups) == 1:
                self.parent.raise_exception("'%s' is already a Parameter"
                                            " target" % dups.pop(), ValueError)
            elif len(dups) > 1:
                self.parent.raise_exception("%s are already Parameter targets"
                                            % sorted(list(dups)), ValueError)

            if key in self._parameters:
                self.parent.raise_exception("%s is already a Parameter" % key,
                                            ValueError)
            try:
                _scope = self._get_scope(scope)
                if len(names) == 1:
                    target = self._create(names[0], low, high, scaler, adder,
                                          start, fd_step, key, _scope)
                else:  # defining a ParameterGroup
                    parameters = [self._create(n, low, high, scaler, adder,
                                               start, fd_step, key, _scope)
                                  for n in names]
                    types = set([p.valtypename for p in parameters])
                    if len(types) > 1:
                        raise ValueError("Can't add parameter %s because "
                                         "%s are not all of the same type" %
                                         (key, " and ".join(names)))
                    target = ParameterGroup(parameters)
                self._parameters[key] = target
            except Exception:
                self.parent.reraise_exception()

        self.parent.config_changed()

    def _create(self, target, low, high, scaler, adder, start, fd_step,
                key, scope):
        """ Create one Parameter or ArrayParameter. """
        try:
            expreval = ExprEvaluator(target, scope)
        except Exception as err:
            raise err.__class__("Can't add parameter: %s" % err)
        if not expreval.is_valid_assignee():
            raise ValueError("Can't add parameter: '%s' is not a"
                             " valid parameter expression"
                             % expreval.text)
        try:
            val = expreval.evaluate()
        except Exception as err:
            val = None  # Let Parameter code sort out why.

        name = key[0] if isinstance(key, tuple) else key

        if isinstance(val, ndarray):
            return ArrayParameter(target, low=low, high=high,
                                  scaler=scaler, adder=adder,
                                  start=start, fd_step=fd_step,
                                  name=name, scope=scope,
                                  _expreval=expreval, _val=val,
                                  _allowed_types=self._allowed_types)
        else:
            return Parameter(target, low=low, high=high,
                             scaler=scaler, adder=adder,
                             start=start, fd_step=fd_step,
                             name=name, scope=scope,
                             _expreval=expreval, _val=val,
                             _allowed_types=self._allowed_types)

    def remove_parameter(self, name):
        """Removes the parameter with the given name."""
        param = self._parameters.get(name)
        if param:
            del self._parameters[name]
        else:
            self.parent.raise_exception("Trying to remove parameter '%s' "
                                        "that is not in this driver."
                                        % (name,), AttributeError)
        self.parent.config_changed()

    def config_parameters(self):
        """Reconfigure parameters from potentially changed targets."""
        for param in self._parameters.values():
            param.configure()

    def get_references(self, name):
        """Return references to component `name` in preparation for subsequent
        :meth:`restore_references` call.

        name: string
            Name of component being removed.
        """
        refs = OrderedDict()
        for pname, param in self._parameters.items():
            if name in param.get_referenced_compnames():
                refs[pname] = param
        return refs

    def remove_references(self, name):
        """Remove references to component `name`.

        name: string
            Name of component being removed.
        """
        to_remove = []
        for pname, param in self._parameters.items():
            if name in param.get_referenced_compnames():
                to_remove.append(pname)

        for pname in to_remove:
            self.remove_parameter(pname)

    def restore_references(self, refs):
        """Restore references to component `name` from `refs`.

        refs: object
            Value returned by :meth:`get_references`.
        """
        for pname, param in refs.items():
            try:
                self.add_parameter(param)
            except Exception as err:
                self.parent._logger.warning("Couldn't restore parameter '%s': %s"
                                            % (pname, str(err)))

    def list_param_targets(self):
        """Returns a list of parameter targets. Note that this
        list may contain more entries than the list of Parameter,
        ParameterGroup, and ArrayParameter objects since ParameterGroup
        instances have multiple targets.
        """
        targets = []
        for param in self._parameters.values():
            targets.extend(param.targets)
        return targets

    def list_param_group_targets(self):
        """Returns a list of tuples that contain the targets for each
        parameter group.
        """
        targets = []
        for param in self.get_parameters().values():
            targets.append(tuple(param.targets))
        return targets

    def clear_parameters(self):
        """Removes all parameters."""
        for name in self._parameters.keys():
            self.remove_parameter(name)
        self._parameters = OrderedDict()

    def get_parameters(self):
        """Returns an ordered dict of parameter objects."""
        return self._parameters

    def total_parameters(self):
        """Returns the total number of values to be set."""
        return sum([param.size for param in self._parameters.values()])

    def init_parameters(self):
        """Sets all parameters to their start value if a
        start value is given
        """
        scope = self._get_scope()
        for param in self._parameters.itervalues():
            if param.start is not None:
                param.set(param.start, scope)

    def set_parameter_by_name(self, name, value, case=None, scope=None):
        """Sets a single parameter by its name attribute.

        name: str
            Name of the parameter. This is either the name alias given when
            the parameter was added or the variable path of the parameter's
            target if no name was given.

        value: object (typically a float)
            Value of the parameter to be set.

        case: Case (optional)
            If supplied, the values will be associated with their corresponding
            targets and added as inputs to the Case instead of being set
            directly into the model.
        """
        param = self._parameters[name]
        if case is None:
            param.set(value, self._get_scope(scope))
        else:
            for target in param.targets:
                case.add_input(target, value)
            return case

    def set_parameters(self, values, case=None, scope=None):
        """Pushes the values in the iterator 'values' into the corresponding
        variables in the model.  If the 'case' arg is supplied, the values
        will be set into the case and not into the model.

        values: iterator
            Iterator of input values with an order defined to match the
            order of parameters returned by the get_parameters method. All
            'values' must support the len() function.

        case: Case (optional)
            If supplied, the values will be associated with their corresponding
            targets and added as inputs to the Case instead of being set
            directly into the model.
        """
        if len(values) != self.total_parameters():
            raise ValueError("number of input values (%s) != expected number of"
                             " values (%s)" %
                             (len(values), self.total_parameters()))
        if case is None:
            scope = self._get_scope(scope)
            start = 0
            for param in self._parameters.values():
                size = param.size
                if size == 1:
                    param.set(values[start], scope)
                    start += 1
                else:
                    end = start + size
                    param.set(values[start:end], scope)
                    start = end
        else:
            start = 0
            for param in self._parameters.values():
                size = param.size
                if size == 1:
                    for target in param.targets:
                        case.add_input(target, values[start])
                    start += 1
                else:
                    end = start + size
                    for target in param.targets:
                        case.add_input(target, values[start:end])
                    start = end
            return case

    def eval_parameters(self, scope=None, dtype='d'):
        """Return evaluated parameter values.

        dtype: string or None
            If not None, return an array of this dtype. Otherwise just return
            a list (useful if parameters may be of different types).
        """
        result = []
        for param in self._parameters.values():
            result.extend(param.evaluate(scope))
        if dtype:
            result = array(result, dtype)
        return result

    def get_lower_bounds(self, dtype='d'):
        """Return lower bound values.

        dtype: string or None
            If not None, return an array of this dtype. Otherwise just return
            a list (useful if parameters may be of different types).
        """
        result = []
        for param in self._parameters.values():
            result.extend(param.get_low())
        if dtype:
            result = array(result, dtype)
        return result

    def get_upper_bounds(self, dtype='d'):
        """Return upper bound values.

        dtype: string or None
            If not None, return an array of this dtype. Otherwise just return
            a list (useful if parameters may be of different types).
        """
        result = []
        for param in self._parameters.values():
            result.extend(param.get_high())
        if dtype:
            result = array(result, dtype)
        return result

    def get_fd_steps(self, dtype='d'):
        """Return fd_step values, they may include None.

        dtype: string or None
            If not None, return an array of this dtype. Otherwise just return
            a list (useful if it's valid to have None for a step size).
        """
        result = []
        for param in self._parameters.values():
            result.extend(param.get_fd_step())
        if dtype:
            result = array(result, dtype)
        return result

    def get_expr_depends(self):
        """Returns a list of tuples of the form (src_comp_name, dest_comp_name)
        for each dependency introduced by a parameter.
        """
        conn_list = []
        pname = self.parent.name
        for param in self._parameters.values():
            for cname in param.get_referenced_compnames():
                conn_list.append((pname, cname))
        return conn_list

    def get_referenced_compnames(self):
        """Return a set of Component names based on the
        pathnames of Variables referenced in our target strings.
        """
        result = set()
        for param in self._parameters.values():
            result.update(param.get_referenced_compnames())
        return result

    def get_referenced_varpaths(self):
        """Return a set of Variable names referenced in our target strings.
        """
        result = set()
        for param in self._parameters.values():
            result.update(param.get_referenced_varpaths())
        return result

    def _get_scope(self, scope=None):
        if scope is None:
            try:
                return self.parent.get_expr_scope()
            except AttributeError:
                pass
        return scope

    def mimic(self, target):
        old = self._parameters
        self.clear_parameters()
        try:
            for name, param in target.get_parameters().items():
                self._parameters[name] = param.copy()
        except Exception:
            self._parameters = old
            raise
Exemple #5
0
 def apply(self):
     """Transform each
     :class:`~sphinxcontrib.bibtex.nodes.bibliography` node into a
     list of citations.
     """
     env = self.document.settings.env
     for bibnode in self.document.traverse(bibliography):
         # get the information of this bibliography node
         # by looking up its id in the bibliography cache
         id_ = bibnode['ids'][0]
         infos = [
             info for other_id, info in
             env.bibtex_cache.bibliographies.iteritems()
             if other_id == id_ and info.docname == env.docname
         ]
         if not infos:
             raise RuntimeError(
                 "document %s has no bibliography nodes with id '%s'" %
                 (env.docname, id_))
         elif len(infos) >= 2:
             raise RuntimeError(
                 "document %s has multiple bibliography nodes with id '%s'"
                 % (env.docname, id_))
         info = infos[0]
         # generate entries
         entries = OrderedDict()
         for bibfile in info.bibfiles:
             # XXX entries are modified below in an unpickable way
             # XXX so fetch a deep copy
             data = env.bibtex_cache.bibfiles[bibfile].data
             if info.cite == "all":
                 bibfile_entries = data.entries.itervalues()
             elif info.cite == "cited":
                 bibfile_entries = (entry
                                    for entry in data.entries.itervalues()
                                    if env.bibtex_cache.is_cited(entry.key))
             elif info.cite == "notcited":
                 bibfile_entries = (
                     entry for entry in data.entries.itervalues()
                     if not env.bibtex_cache.is_cited(entry.key))
             else:
                 raise RuntimeError("invalid cite option (%s)" % info.cite)
             for entry in bibfile_entries:
                 entries[entry.key] = copy.deepcopy(entry)
         # order entries according to which were cited first
         # first, we add all keys that were cited
         # then, we add all remaining keys
         sorted_entries = []
         for key in env.bibtex_cache.get_all_cited_keys():
             try:
                 entry = entries.pop(key)
             except KeyError:
                 pass
             else:
                 sorted_entries.append(entry)
         sorted_entries += entries.itervalues()
         # locate and instantiate style plugin
         style_cls = find_plugin('pybtex.style.formatting', info.style)
         style = style_cls()
         # create citation nodes for all references
         backend = output_backend()
         if info.list_ == "enumerated":
             nodes = docutils.nodes.enumerated_list()
             nodes['enumtype'] = info.enumtype
             if info.start >= 1:
                 nodes['start'] = info.start
                 env.bibtex_cache.set_enum_count(env.docname, info.start)
             else:
                 nodes['start'] = env.bibtex_cache.get_enum_count(
                     env.docname)
         elif info.list_ == "bullet":
             nodes = docutils.nodes.bullet_list()
         else:  # "citation"
             nodes = docutils.nodes.paragraph()
         # XXX style.format_entries modifies entries in unpickable way
         for entry in style.format_entries(sorted_entries):
             if info.list_ == "enumerated" or info.list_ == "bullet":
                 citation = docutils.nodes.list_item()
                 citation += entry.text.render(backend)
             else:  # "citation"
                 citation = backend.citation(entry, self.document)
                 # backend.citation(...) uses entry.key as citation label
                 # we change it to entry.label later onwards
                 # but we must note the entry.label now;
                 # at this point, we also already prefix the label
                 key = citation[0].astext()
                 info.labels[key] = info.labelprefix + entry.label
             node_text_transform(citation, transform_url_command)
             if info.curly_bracket_strip:
                 node_text_transform(citation,
                                     transform_curly_bracket_strip)
             nodes += citation
             if info.list_ == "enumerated":
                 env.bibtex_cache.inc_enum_count(env.docname)
         bibnode.replace_self(nodes)
Exemple #6
0
class Warrior(object):
    '''The warrior god object.'''
    def __init__(self, projects_dir, data_dir, warrior_hq_url, real_shutdown=False, keep_data=False):
        if not os.access(projects_dir, os.W_OK):
            raise Exception("Couldn't write to projects directory: %s" % projects_dir)
        if not os.access(data_dir, os.W_OK):
            raise Exception("Couldn't write to data directory: %s" % data_dir)

        self.projects_dir = projects_dir
        self.data_dir = data_dir
        self.warrior_hq_url = warrior_hq_url
        self.real_shutdown = real_shutdown
        self.keep_data = keep_data

        # disable the password prompts
        self.gitenv = dict(os.environ.items() + { 'GIT_ASKPASS': '******', 'SSH_ASKPASS': '******' }.items())

        self.warrior_id = StringConfigValue(
          name="warrior_id",
          title="Warrior ID",
          description="The unique number of your warrior instance.",
          editable=False
        )
        self.selected_project_config_value = StringConfigValue(
          name="selected_project",
          title="Selected project",
          description="The project (to be continued when the warrior restarts).",
          default="none",
          editable=False
        )
        self.downloader = StringConfigValue(
          name="downloader",
          title="Your nickname",
          description="We use your nickname to show your results on our tracker. Letters and numbers only.",
          regex="^[-_a-zA-Z0-9]{3,30}$",
          advanced=False
        )
        self.concurrent_items = NumberConfigValue(
          name="concurrent_items",
          title="Concurrent items",
          description="How many items should the warrior download at a time? (Max: 6)",
          min=1,
          max=6,
          default=2
        )
        self.http_username = StringConfigValue(
          name="http_username",
          title="HTTP username",
          description="Enter a username to protect the web interface, or leave empty.",
          default=""
        )
        self.http_password = StringConfigValue(
          name="http_password",
          title="HTTP password",
          description="Enter a password to protect the web interface, or leave empty.",
          default=""
        )

        self.config_manager = ConfigManager(os.path.join(projects_dir, "config.json"))
        self.config_manager.add(self.warrior_id)
        self.config_manager.add(self.selected_project_config_value)
        self.config_manager.add(self.downloader)
        self.config_manager.add(self.concurrent_items)
        self.config_manager.add(self.http_username)
        self.config_manager.add(self.http_password)

        self.bandwidth_monitor = BandwidthMonitor("eth0")
        self.bandwidth_monitor.update()

        self.runner = Runner(concurrent_items=self.concurrent_items, keep_data=self.keep_data)
        self.runner.on_finish += self.handle_runner_finish

        self.current_project_name = None
        self.current_project = None

        self.selected_project = None

        self.projects = {}
        self.installed_projects = set()
        self.failed_projects = set()

        self.on_projects_loaded = Event()
        self.on_project_installing = Event()
        self.on_project_installed = Event()
        self.on_project_installation_failed = Event()
        self.on_project_refresh = Event()
        self.on_project_selected = Event()
        self.on_status = Event()

        self.http_client = AsyncHTTPClient()

        self.installing = False
        self.shut_down_flag = False
        self.reboot_flag = False

        self.hq_updater = ioloop.PeriodicCallback(self.update_warrior_hq, 10 * 60 * 1000)
        self.project_updater = ioloop.PeriodicCallback(self.update_project, 60 * 60 * 1000)
        self.forced_reboot_timeout = None

        self.lat_lng = None
        self.find_lat_lng()

    def find_lat_lng(self):
        # response = self.http_client.fetch("http://www.maxmind.com/app/mylocation", self.handle_lat_lng, user_agent="")
        pass

    def handle_lat_lng(self, response):
        m = re.search(r"geoip-demo-results-tbodyLatitude/Longitude</td>\s*<td[^>]*>\s*([-/.0-9]+)\s*</td>", response.body)
        if m:
            self.lat_lng = m.group(1)

    def bandwidth_stats(self):
        self.bandwidth_monitor.update()
        return self.bandwidth_monitor.current_stats()

    @gen.engine
    def update_warrior_hq(self):
        if realize(self.warrior_id) == None:
            response = yield gen.Task(self.http_client.fetch,
                                      os.path.join(self.warrior_hq_url, "api/register.json"),
                                      method="POST",
                                      headers={"Content-Type": "application/json"},
                                      user_agent=("ArchiveTeam Warrior/%s" % seesaw.__version__),
                                      body=json.dumps({"warrior":{"version": seesaw.__version__}}))
            if response.code == 200:
                data = json.loads(response.body)
                print "Received Warrior ID '%s'." % data["warrior_id"]
                self.config_manager.set_value("warrior_id", data["warrior_id"])
            else:
                print "HTTP error %s" % (response.code)
                return
        else:
            print "Warrior ID '%s'." % realize(self.warrior_id)

        response = yield gen.Task(self.http_client.fetch,
                                  os.path.join(self.warrior_hq_url, "api/update.json"),
                                  method="POST",
                                  headers={"Content-Type": "application/json"},
                                  user_agent=("ArchiveTeam Warrior/%s %s" % (seesaw.__version__, seesaw.runner_type)),
                                  body=json.dumps({"warrior":{
                                    "warrior_id": realize(self.warrior_id),
                                    "lat_lng": self.lat_lng,
                                    "downloader": realize(self.downloader),
                                    "selected_project": realize(self.selected_project_config_value)
                                  }}))
        if response.code == 200:
            data = json.loads(response.body)

            if StrictVersion(seesaw.__version__) < StrictVersion(data["warrior"]["seesaw_version"]):
                # time for an update
                print "Reboot for Seesaw update."
                self.reboot_gracefully()

                # schedule a forced reboot after two days
                self.schedule_forced_reboot()
                return

            projects_list = data["projects"]
            self.projects = OrderedDict([ (project["name"], project) for project in projects_list ])
            for project_data in self.projects.itervalues():
                if "deadline" in project_data:
                    project_data["deadline_int"] = time.mktime(time.strptime(project_data["deadline"], "%Y-%m-%dT%H:%M:%SZ"))


            previous_project_choice = realize(self.selected_project_config_value)

            if self.selected_project and not self.selected_project in self.projects:
                self.select_project(None)
            elif previous_project_choice in self.projects:
                # select previous project
                self.select_project(previous_project_choice)
            elif previous_project_choice == "auto":
                # ArchiveTeam's choice
                if "auto_project" in data:
                    self.select_project(data["auto_project"])
                else:
                    self.select_project(None)

            self.on_projects_loaded(self, self.projects)

        else:
            print "HTTP error %s" % (response.code)

    @gen.engine
    def install_project(self, project_name, callback=None):
        self.installed_projects.discard(project_name)

        if project_name in self.projects and not self.installing:
            self.installing = project_name
            self.install_output = []

            project = self.projects[project_name]
            project_path = os.path.join(self.projects_dir, project_name)

            self.on_project_installing(self, project)

            if project_name in self.failed_projects:
                if os.path.exists(project_path):
                    shutil.rmtree(project_path)
                self.failed_projects.discard(project_name)

            if os.path.exists(project_path):
                subprocess.Popen(
                    args=[ "git", "config", "remote.origin.url", project["repository"] ],
                    cwd=project_path
                ).communicate()

                p = AsyncPopen(
                    args=[ "git", "pull" ],
                    cwd=project_path,
                    env=self.gitenv
                )
            else:
                p = AsyncPopen(
                    args=[ "git", "clone", project["repository"], project_path ],
                    env=self.gitenv
                )
            p.on_output += self.collect_install_output
            p.on_end += yield gen.Callback("gitend")
            p.run()
            result = yield gen.Wait("gitend")

            if result != 0:
                self.install_output.append("\ngit returned %d\n" % result)
                self.on_project_installation_failed(self, project, "".join(self.install_output))
                self.installing = None
                self.failed_projects.add(project_name)
                if callback:
                    callback(False)
                return

            project_install_file = os.path.join(project_path, "warrior-install.sh")

            if os.path.exists(project_install_file):
                p = AsyncPopen(
                    args=[ project_install_file ],
                    cwd=project_path
                )
                p.on_output += self.collect_install_output
                p.on_end += yield gen.Callback("installend")
                p.run()
                result = yield gen.Wait("installend")

                if result != 0:
                    self.install_output.append("\nCustom installer returned %d\n" % result)
                    self.on_project_installation_failed(self, project, "".join(self.install_output))
                    self.installing = None
                    self.failed_projects.add(project_name)
                    if callback:
                        callback(False)
                    return

            data_dir = os.path.join(self.data_dir, "data")
            if os.path.exists(data_dir):
                shutil.rmtree(data_dir)
            os.makedirs(data_dir)

            project_data_dir = os.path.join(project_path, "data")
            if os.path.islink(project_data_dir):
                os.remove(project_data_dir)
            elif os.path.isdir(project_data_dir):
                shutil.rmtree(project_data_dir)
            os.symlink(data_dir, project_data_dir)

            self.installed_projects.add(project_name)
            self.on_project_installed(self, project, "".join(self.install_output))

            self.installing = None
            if callback:
                callback(True)

    @gen.engine
    def update_project(self):
        if self.selected_project and (yield gen.Task(self.check_project_has_update, self.selected_project)):
            # restart project
            self.start_selected_project()

    @gen.engine
    def check_project_has_update(self, project_name, callback):
        if project_name in self.projects:
            project = self.projects[project_name]
            project_path = os.path.join(self.projects_dir, project_name)

            self.install_output = []

            if not os.path.exists(project_path):
                callback(True)
                return

            subprocess.Popen(
                args=["git", "config", "remote.origin.url", project["repository"]],
                cwd=project_path
            ).communicate()

            p = AsyncPopen(
                args=["git", "fetch" ],
                cwd=project_path,
                env=self.gitenv
            )
            p.on_output += self.collect_install_output
            p.on_end += yield gen.Callback("gitend")
            p.run()
            result = yield gen.Wait("gitend")

            if result != 0:
                callback(True)
                return

            output = subprocess.Popen(
                args=["git", "rev-list", "HEAD..FETCH_HEAD"],
                cwd=project_path,
                stdout=subprocess.PIPE
            ).communicate()[0]
            if output.strip() != "":
                callback(True)
            else:
                callback(False)

    def collect_install_output(self, data):
        sys.stdout.write(data)
        data = re.sub("[\x00-\x08\x0b\x0c]", "", data)
        self.install_output.append(data)

    @gen.engine
    def select_project(self, project_name):
        if project_name == "auto":
            self.update_warrior_hq()
            return

        if not project_name in self.projects:
            project_name = None

        if project_name != self.selected_project:
            # restart
            self.selected_project = project_name
            self.on_project_selected(self, project_name)
            self.start_selected_project()

    def clone_project(self, project_name, project_path):
        version_string = subprocess.Popen(
            args=["git", "log", "-1", "--pretty=%h"],
            cwd=project_path,
            stdout=subprocess.PIPE
        ).communicate()[0].strip()

        project_versioned_path = os.path.join(self.data_dir, "projects", "%s-%s" % (project_name, version_string))
        if not os.path.exists(project_versioned_path):
            if not os.path.exists(os.path.join(self.data_dir, "projects")):
                os.makedirs(os.path.join(self.data_dir, "projects"))

            subprocess.Popen(
                args=["git", "clone", project_path, project_versioned_path],
                env=self.gitenv
            ).communicate()

        return project_versioned_path

    def load_pipeline(self, pipeline_path, context):
        dirname, basename = os.path.split(pipeline_path)
        if dirname == "":
            dirname = "."

        with open(pipeline_path) as f:
            pipeline_str = f.read()

        ConfigValue.start_collecting()

        local_context = context
        global_context = context
        curdir = os.getcwd()
        try:
            os.chdir(dirname)
            exec pipeline_str in local_context, global_context
        finally:
            os.chdir(curdir)

        config_values = ConfigValue.stop_collecting()

        project = local_context["project"]
        pipeline = local_context["pipeline"]
        pipeline.project = project
        return (project, pipeline, config_values)

    @gen.engine
    def start_selected_project(self):
        project_name = self.selected_project

        if project_name in self.projects:
            # install or update project if necessary
            if not project_name in self.installed_projects or (yield gen.Task(self.check_project_has_update, project_name)):
                result = yield gen.Task(self.install_project, project_name)
                if not result:
                    return

            # remove the configuration variables from the previous project
            if self.current_project:
                for config_value in self.current_project.config_values:
                    self.config_manager.remove(config_value.name)

            # the path with the project code
            # (this is the most recent code from the repository)
            project_path = os.path.join(self.projects_dir, project_name)

            # clone the project code to a versioned directory
            # where the pipeline is actually run
            project_versioned_path = self.clone_project(project_name, project_path)

            # load the pipeline from the versioned directory
            pipeline_path = os.path.join(project_versioned_path, "pipeline.py")
            (project, pipeline, config_values) = self.load_pipeline(pipeline_path, { "downloader": self.downloader })

            # add the configuration values to the config manager
            for config_value in config_values:
                self.config_manager.add(config_value)
            project.config_values = config_values

            # start the pipeline
            if not self.shut_down_flag and not self.reboot_flag:
                self.runner.set_current_pipeline(pipeline)

            self.current_project_name = project_name
            self.current_project = project

            self.on_project_refresh(self, self.current_project, self.runner)
            self.fire_status()

            if not self.shut_down_flag and not self.reboot_flag:
                self.runner.start()

        else:
            # project_name not in self.projects,
            # stop the current project (if there is one)
            self.runner.set_current_pipeline(None)
            self.fire_status()

    def handle_runner_finish(self, runner):
        if self.current_project:
            for config_value in self.current_project.config_values:
                self.config_manager.remove(config_value.name)

        self.current_project_name = None
        self.current_project = None

        self.on_project_refresh(self, self.current_project, self.runner)
        self.fire_status()

        if self.shut_down_flag or self.reboot_flag:
            ioloop.IOLoop.instance().stop()

            if self.real_shutdown:
                if self.shut_down_flag:
                    os.system("sudo shutdown -h now")
                elif self.reboot_flag:
                    os.system("sudo shutdown -r now")

    def start(self):
        if self.real_shutdown:
            # schedule a reboot
            ioloop.IOLoop.instance().add_timeout(datetime.timedelta(days=7), self.max_age_reached)

        self.hq_updater.start()
        self.project_updater.start()
        self.update_warrior_hq()
        ioloop.IOLoop.instance().start()

    def max_age_reached(self):
        if self.real_shutdown:
            # time for an sanity reboot
            print "Running for more than 7 days. Time to schedule a reboot."
            self.reboot_gracefully()

            # schedule a forced reboot after two days
            self.schedule_forced_reboot()

    def reboot_gracefully(self):
        self.shut_down_flag = False
        self.reboot_flag = True
        self.fire_status()
        if self.runner.is_active():
            self.runner.set_current_pipeline(None)
        else:
            ioloop.IOLoop.instance().stop()
            if self.real_shutdown:
                os.system("sudo shutdown -r now")

    def schedule_forced_reboot(self):
        if self.real_shutdown and not self.forced_reboot_timeout:
            self.forced_reboot_timeout = ioloop.IOLoop.instance().add_timeout(datetime.timedelta(days=2), self.forced_reboot)

    def forced_reboot(self):
        print "Stopping immediately..."
        if self.real_shutdown:
            os.system("sudo shutdown -r now")

    def stop_gracefully(self):
        self.shut_down_flag = True
        self.reboot_flag = False
        self.fire_status()
        if self.runner.is_active():
            self.runner.set_current_pipeline(None)
        else:
            ioloop.IOLoop.instance().stop()
            if self.real_shutdown:
                os.system("sudo shutdown -h now")

    def forced_stop(self):
        ioloop.IOLoop.instance().stop()
        if self.real_shutdown:
            os.system("sudo shutdown -h now")

    def keep_running(self):
        self.shut_down_flag = False
        self.reboot_flag = False
        self.start_selected_project()
        self.fire_status()

    class Status(object):
        NO_PROJECT = "NO_PROJECT"
        INVALID_SETTINGS = "INVALID_SETTINGS"
        STOPPING_PROJECT = "STOPPING_PROJECT"
        RESTARTING_PROJECT = "RESTARTING_PROJECT"
        RUNNING_PROJECT = "RUNNING_PROJECT"
        SWITCHING_PROJECT = "SWITCHING_PROJECT"
        STARTING_PROJECT = "STARTING_PROJECT"
        SHUTTING_DOWN = "SHUTTING_DOWN"
        REBOOTING = "REBOOTING"

    def fire_status(self):
        self.on_status(self, self.warrior_status())

    def warrior_status(self):
        if self.shut_down_flag:
            return Warrior.Status.SHUTTING_DOWN
        elif self.reboot_flag:
            return Warrior.Status.REBOOTING
        elif not self.config_manager.all_valid():
            return Warrior.Status.INVALID_SETTINGS
        elif self.selected_project == None and self.current_project_name == None:
            return Warrior.Status.NO_PROJECT
        elif self.selected_project:
            if self.selected_project == self.current_project_name:
                return Warrior.Status.RUNNING_PROJECT
            else:
                return Warrior.Status.STARTING_PROJECT
        else:
            return Warrior.Status.STOPPING_PROJECT
Exemple #7
0
class HasParameters(object):
    """This class provides an implementation of the IHasParameters interface."""

    _do_not_promote = [
        'get_expr_depends', 'get_referenced_compnames',
        'get_referenced_varpaths', 'get_metadata'
    ]

    def __init__(self, parent):
        self._parameters = OrderedDict()
        self._allowed_types = ['continuous']
        if obj_has_interface(parent, ISolver):
            self._allowed_types.append('unbounded')
        self._parent = None if parent is None else weakref.ref(parent)

    def __getstate__(self):
        state = self.__dict__.copy()
        state['_parent'] = self.parent
        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        parent = state['_parent']
        self._parent = None if parent is None else weakref.ref(parent)

    @property
    def parent(self):
        """ The object we are a delegate of. """
        return None if self._parent is None else self._parent()

    def _item_count(self):
        """This is used by the replace function to determine if a delegate from
        the target object is 'empty' or not.  If it's empty, it's not an error
        if the replacing object doesn't have this delegate.
        """
        return len(self._parameters)

    def add_parameter(self,
                      target,
                      low=None,
                      high=None,
                      scaler=None,
                      adder=None,
                      start=None,
                      fd_step=None,
                      name=None,
                      scope=None):
        """Adds a parameter or group of parameters to the driver.

        target: string or iter of strings or Parameter
            What the driver should vary during execution. A *target* is an
            expression that can reside on the left-hand side of an assignment
            statement, so typically it will be the name of a variable or
            possibly a subscript expression indicating an entry within an array
            variable, e.g., x[3]. If an iterator of targets is given, then the
            driver will set all targets given to the same value whenever it
            varies this parameter during execution. If a Parameter instance is
            given, then that instance is copied into the driver with any other
            arguments specified, overiding the values in the given parameter.

        low: float (optional)
            Minimum allowed value of the parameter. If scaler and/or adder
            is supplied, use the transformed value here. If target is an array,
            this may also be an array, but must have the same size.

        high: float (optional)
            Maximum allowed value of the parameter. If scaler and/or adder
            is supplied, use the transformed value here. If target is an array,
            this may also be an array, but must have the same size.

        scaler: float (optional)
            Value to multiply the possibly offset parameter value by. If target
            is an array, this may also be an array, but must have the same size.

        adder: float (optional)
            Value to add to parameter prior to possible scaling. If target is
            an array, this may also be an array, but must have the same size.

        start: any (optional)
            Value to set into the target or targets of a parameter before
            starting any executions. If not given, analysis will start with
            whatever values are in the target or targets at that time. If target
            is an array, this may also be an array, but must have the same size.

        fd_step: float (optional)
            Step-size to use for finite difference calculation. If no value is
            given, the differentiator will use its own default. If target is an
            array, this may also be an array, but must have the same size.

        name: str (optional)
            Name used to refer to the parameter in place of the name of the
            variable referred to in the parameter string.
            This is sometimes useful if, for example, multiple entries in the
            same array variable are declared as parameters.

        scope: object (optional)
            The object to be used as the scope when evaluating the expression.

        If neither "low" nor "high" is specified, the min and max will
        default to the values in the metadata of the variable being
        referenced.
        """

        if isinstance(target, (ParameterBase, ParameterGroup)):
            self._parameters[target.name] = target
            target.override(low, high, scaler, adder, start, fd_step, name)
        else:
            if isinstance(target, basestring):
                names = [target]
                key = target
            else:
                names = target
                key = tuple(target)

            if name is not None:
                key = name

            dups = set(self.list_param_targets()).intersection(names)
            if len(dups) == 1:
                self.parent.raise_exception(
                    "'%s' is already a Parameter"
                    " target" % dups.pop(), ValueError)
            elif len(dups) > 1:
                self.parent.raise_exception(
                    "%s are already Parameter targets" % sorted(list(dups)),
                    ValueError)

            if key in self._parameters:
                self.parent.raise_exception("%s is already a Parameter" % key,
                                            ValueError)
            try:
                _scope = self._get_scope(scope)
                if len(names) == 1:
                    target = self._create(names[0], low, high, scaler, adder,
                                          start, fd_step, key, _scope)
                else:  # defining a ParameterGroup
                    parameters = [
                        self._create(n, low, high, scaler, adder, start,
                                     fd_step, key, _scope) for n in names
                    ]
                    types = set([p.valtypename for p in parameters])
                    if len(types) > 1:
                        raise ValueError("Can't add parameter %s because "
                                         "%s are not all of the same type" %
                                         (key, " and ".join(names)))
                    target = ParameterGroup(parameters)
                self._parameters[key] = target
            except Exception:
                self.parent.reraise_exception(info=sys.exc_info())

        self.parent.config_changed()

    def _create(self, target, low, high, scaler, adder, start, fd_step, key,
                scope):
        """ Create one Parameter or ArrayParameter. """
        try:
            expreval = ExprEvaluator(target, scope)
        except Exception as err:
            raise err.__class__("Can't add parameter: %s" % err)
        if not expreval.is_valid_assignee():
            raise ValueError("Can't add parameter: '%s' is not a"
                             " valid parameter expression" % expreval.text)
        try:
            val = expreval.evaluate()
        except Exception as err:
            val = None  # Let Parameter code sort out why.

        name = key[0] if isinstance(key, tuple) else key

        if isinstance(val, ndarray):
            return ArrayParameter(target,
                                  low=low,
                                  high=high,
                                  scaler=scaler,
                                  adder=adder,
                                  start=start,
                                  fd_step=fd_step,
                                  name=name,
                                  scope=scope,
                                  _expreval=expreval,
                                  _val=val,
                                  _allowed_types=self._allowed_types)
        else:
            return Parameter(target,
                             low=low,
                             high=high,
                             scaler=scaler,
                             adder=adder,
                             start=start,
                             fd_step=fd_step,
                             name=name,
                             scope=scope,
                             _expreval=expreval,
                             _val=val,
                             _allowed_types=self._allowed_types)

    def remove_parameter(self, name):
        """Removes the parameter with the given name."""
        param = self._parameters.get(name)
        if param:
            del self._parameters[name]
        else:
            self.parent.raise_exception(
                "Trying to remove parameter '%s' "
                "that is not in this driver." % (name, ), AttributeError)
        self.parent.config_changed()

    def config_parameters(self):
        """Reconfigure parameters from potentially changed targets."""
        for param in self._parameters.values():
            param.configure()

    def get_references(self, name):
        """Return references to component `name` in preparation for subsequent
        :meth:`restore_references` call.

        name: string
            Name of component being removed.
        """
        refs = OrderedDict()
        for pname, param in self._parameters.items():
            if name in param.get_referenced_compnames():
                refs[pname] = param
        return refs

    def remove_references(self, name):
        """Remove references to component `name`.

        name: string
            Name of component being removed.
        """
        to_remove = []
        for pname, param in self._parameters.items():
            if name in param.get_referenced_compnames():
                to_remove.append(pname)

        for pname in to_remove:
            self.remove_parameter(pname)

    def restore_references(self, refs):
        """Restore references to component `name` from `refs`.

        refs: object
            Value returned by :meth:`get_references`.
        """
        for pname, param in refs.items():
            try:
                self.add_parameter(param)
            except Exception as err:
                self.parent._logger.warning(
                    "Couldn't restore parameter '%s': %s" % (pname, str(err)))

    def list_param_targets(self):
        """Returns a list of parameter targets. Note that this
        list may contain more entries than the list of Parameter,
        ParameterGroup, and ArrayParameter objects since ParameterGroup
        instances have multiple targets.
        """
        targets = []
        for param in self._parameters.values():
            targets.extend(param.targets)
        return targets

    def list_param_group_targets(self):
        """Returns a list of tuples that contain the targets for each
        parameter group.
        """
        targets = []
        for param in self.get_parameters().values():
            targets.append(tuple(param.targets))
        return targets

    def clear_parameters(self):
        """Removes all parameters."""
        for name in self._parameters.keys():
            self.remove_parameter(name)
        self._parameters = OrderedDict()

    def get_parameters(self):
        """Returns an ordered dict of parameter objects."""
        return self._parameters

    def total_parameters(self):
        """Returns the total number of values to be set."""
        return sum([param.size for param in self._parameters.values()])

    def init_parameters(self):
        """Sets all parameters to their start value if a
        start value is given
        """
        scope = self._get_scope()
        for param in self._parameters.itervalues():
            if param.start is not None:
                param.set(param.start, scope)

    def set_parameter_by_name(self, name, value, case=None, scope=None):
Exemple #8
0
class Warrior(object):
    def __init__(self,
                 projects_dir,
                 data_dir,
                 warrior_hq_url,
                 real_shutdown=False,
                 keep_data=False):
        if not os.access(projects_dir, os.W_OK):
            raise Exception("Couldn't write to projects directory: %s" %
                            projects_dir)
        if not os.access(data_dir, os.W_OK):
            raise Exception("Couldn't write to data directory: %s" % data_dir)

        self.projects_dir = projects_dir
        self.data_dir = data_dir
        self.warrior_hq_url = warrior_hq_url
        self.real_shutdown = real_shutdown
        self.keep_data = keep_data

        # disable the password prompts
        self.gitenv = dict(os.environ.items() + {
            'GIT_ASKPASS': '******',
            'SSH_ASKPASS': '******'
        }.items())

        self.warrior_id = StringConfigValue(
            name="warrior_id",
            title="Warrior ID",
            description="The unique number of your warrior instance.",
            editable=False)
        self.selected_project_config_value = StringConfigValue(
            name="selected_project",
            title="Selected project",
            description=
            "The project (to be continued when the warrior restarts).",
            default="none",
            editable=False)
        self.downloader = StringConfigValue(
            name="downloader",
            title="Your nickname",
            description=
            "We use your nickname to show your results on our tracker. Letters and numbers only.",
            regex="^[-_a-zA-Z0-9]{3,30}$",
            advanced=False)
        self.concurrent_items = NumberConfigValue(
            name="concurrent_items",
            title="Concurrent items",
            description=
            "How many items should the warrior download at a time? (Max: 6)",
            min=1,
            max=6,
            default=2)
        self.http_username = StringConfigValue(
            name="http_username",
            title="HTTP username",
            description=
            "Enter a username to protect the web interface, or leave empty.",
            default="")
        self.http_password = StringConfigValue(
            name="http_password",
            title="HTTP password",
            description=
            "Enter a password to protect the web interface, or leave empty.",
            default="")

        self.config_manager = ConfigManager(
            os.path.join(projects_dir, "config.json"))
        self.config_manager.add(self.warrior_id)
        self.config_manager.add(self.selected_project_config_value)
        self.config_manager.add(self.downloader)
        self.config_manager.add(self.concurrent_items)
        self.config_manager.add(self.http_username)
        self.config_manager.add(self.http_password)

        self.bandwidth_monitor = BandwidthMonitor("eth0")
        self.bandwidth_monitor.update()

        self.runner = Runner(concurrent_items=self.concurrent_items,
                             keep_data=self.keep_data)
        self.runner.on_finish += self.handle_runner_finish

        self.current_project_name = None
        self.current_project = None

        self.selected_project = None

        self.projects = {}
        self.installed_projects = set()
        self.failed_projects = set()

        self.on_projects_loaded = Event()
        self.on_project_installing = Event()
        self.on_project_installed = Event()
        self.on_project_installation_failed = Event()
        self.on_project_refresh = Event()
        self.on_project_selected = Event()
        self.on_status = Event()

        self.http_client = AsyncHTTPClient()

        self.installing = False
        self.shut_down_flag = False
        self.reboot_flag = False

        self.hq_updater = ioloop.PeriodicCallback(self.update_warrior_hq,
                                                  10 * 60 * 1000)
        self.project_updater = ioloop.PeriodicCallback(self.update_project,
                                                       60 * 60 * 1000)
        self.forced_reboot_timeout = None

        self.lat_lng = None
        self.find_lat_lng()

    def find_lat_lng(self):
        # response = self.http_client.fetch("http://www.maxmind.com/app/mylocation", self.handle_lat_lng, user_agent="")
        pass

    def handle_lat_lng(self, response):
        m = re.search(
            r"geoip-demo-results-tbodyLatitude/Longitude</td>\s*<td[^>]*>\s*([-/.0-9]+)\s*</td>",
            response.body)
        if m:
            self.lat_lng = m.group(1)

    def bandwidth_stats(self):
        self.bandwidth_monitor.update()
        return self.bandwidth_monitor.current_stats()

    @gen.engine
    def update_warrior_hq(self):
        if realize(self.warrior_id) == None:
            response = yield gen.Task(
                self.http_client.fetch,
                os.path.join(self.warrior_hq_url, "api/register.json"),
                method="POST",
                headers={"Content-Type": "application/json"},
                user_agent=("ArchiveTeam Warrior/%s" % seesaw.__version__),
                body=json.dumps({"warrior": {
                    "version": seesaw.__version__
                }}))
            if response.code == 200:
                data = json.loads(response.body)
                print "Received Warrior ID '%s'." % data["warrior_id"]
                self.config_manager.set_value("warrior_id", data["warrior_id"])
            else:
                print "HTTP error %s" % (response.code)
                return
        else:
            print "Warrior ID '%s'." % realize(self.warrior_id)

        response = yield gen.Task(
            self.http_client.fetch,
            os.path.join(self.warrior_hq_url, "api/update.json"),
            method="POST",
            headers={"Content-Type": "application/json"},
            user_agent=("ArchiveTeam Warrior/%s %s" %
                        (seesaw.__version__, seesaw.runner_type)),
            body=json.dumps({
                "warrior": {
                    "warrior_id": realize(self.warrior_id),
                    "lat_lng": self.lat_lng,
                    "downloader": realize(self.downloader),
                    "selected_project":
                    realize(self.selected_project_config_value)
                }
            }))
        if response.code == 200:
            data = json.loads(response.body)

            if StrictVersion(seesaw.__version__) < StrictVersion(
                    data["warrior"]["seesaw_version"]):
                # time for an update
                print "Reboot for Seesaw update."
                self.reboot_gracefully()

                # schedule a forced reboot after two days
                self.schedule_forced_reboot()
                return

            projects_list = data["projects"]
            self.projects = OrderedDict([(project["name"], project)
                                         for project in projects_list])
            for project_data in self.projects.itervalues():
                if "deadline" in project_data:
                    project_data["deadline_int"] = time.mktime(
                        time.strptime(project_data["deadline"],
                                      "%Y-%m-%dT%H:%M:%SZ"))

            previous_project_choice = realize(
                self.selected_project_config_value)

            if self.selected_project and not self.selected_project in self.projects:
                self.select_project(None)
            elif previous_project_choice in self.projects:
                # select previous project
                self.select_project(previous_project_choice)
            elif previous_project_choice == "auto":
                # ArchiveTeam's choice
                if "auto_project" in data:
                    self.select_project(data["auto_project"])
                else:
                    self.select_project(None)

            self.on_projects_loaded(self, self.projects)

        else:
            print "HTTP error %s" % (response.code)

    @gen.engine
    def install_project(self, project_name, callback=None):
        self.installed_projects.discard(project_name)

        if project_name in self.projects and not self.installing:
            self.installing = project_name
            self.install_output = []

            project = self.projects[project_name]
            project_path = os.path.join(self.projects_dir, project_name)

            self.on_project_installing(self, project)

            if project_name in self.failed_projects:
                if os.path.exists(project_path):
                    shutil.rmtree(project_path)
                self.failed_projects.discard(project_name)

            if os.path.exists(project_path):
                subprocess.Popen(args=[
                    "git", "config", "remote.origin.url", project["repository"]
                ],
                                 cwd=project_path).communicate()

                p = AsyncPopen(args=["git", "pull"],
                               cwd=project_path,
                               env=self.gitenv)
            else:
                p = AsyncPopen(
                    args=["git", "clone", project["repository"], project_path],
                    env=self.gitenv)
            p.on_output += self.collect_install_output
            p.on_end += yield gen.Callback("gitend")
            p.run()
            result = yield gen.Wait("gitend")

            if result != 0:
                self.install_output.append("\ngit returned %d\n" % result)
                self.on_project_installation_failed(
                    self, project, "".join(self.install_output))
                self.installing = None
                self.failed_projects.add(project_name)
                if callback:
                    callback(False)
                return

            project_install_file = os.path.join(project_path,
                                                "warrior-install.sh")

            if os.path.exists(project_install_file):
                p = AsyncPopen(args=[project_install_file], cwd=project_path)
                p.on_output += self.collect_install_output
                p.on_end += yield gen.Callback("installend")
                p.run()
                result = yield gen.Wait("installend")

                if result != 0:
                    self.install_output.append(
                        "\nCustom installer returned %d\n" % result)
                    self.on_project_installation_failed(
                        self, project, "".join(self.install_output))
                    self.installing = None
                    self.failed_projects.add(project_name)
                    if callback:
                        callback(False)
                    return

            data_dir = os.path.join(self.data_dir, "data")
            if os.path.exists(data_dir):
                shutil.rmtree(data_dir)
            os.makedirs(data_dir)

            project_data_dir = os.path.join(project_path, "data")
            if os.path.islink(project_data_dir):
                os.remove(project_data_dir)
            elif os.path.isdir(project_data_dir):
                shutil.rmtree(project_data_dir)
            os.symlink(data_dir, project_data_dir)

            self.installed_projects.add(project_name)
            self.on_project_installed(self, project,
                                      "".join(self.install_output))

            self.installing = None
            if callback:
                callback(True)

    @gen.engine
    def update_project(self):
        if self.selected_project and (yield gen.Task(
                self.check_project_has_update, self.selected_project)):
            # restart project
            self.start_selected_project()

    @gen.engine
    def check_project_has_update(self, project_name, callback):
        if project_name in self.projects:
            project = self.projects[project_name]
            project_path = os.path.join(self.projects_dir, project_name)

            self.install_output = []

            if not os.path.exists(project_path):
                callback(True)
                return

            subprocess.Popen(args=[
                "git", "config", "remote.origin.url", project["repository"]
            ],
                             cwd=project_path).communicate()

            p = AsyncPopen(args=["git", "fetch"],
                           cwd=project_path,
                           env=self.gitenv)
            p.on_output += self.collect_install_output
            p.on_end += yield gen.Callback("gitend")
            p.run()
            result = yield gen.Wait("gitend")

            if result != 0:
                callback(True)
                return

            output = subprocess.Popen(
                args=["git", "rev-list", "HEAD..FETCH_HEAD"],
                cwd=project_path,
                stdout=subprocess.PIPE).communicate()[0]
            if output.strip() != "":
                callback(True)
            else:
                callback(False)

    def collect_install_output(self, data):
        sys.stdout.write(data)
        data = re.sub("[\x00-\x08\x0b\x0c]", "", data)
        self.install_output.append(data)

    @gen.engine
    def select_project(self, project_name):
        if project_name == "auto":
            self.update_warrior_hq()
            return

        if not project_name in self.projects:
            project_name = None

        if project_name != self.selected_project:
            # restart
            self.selected_project = project_name
            self.on_project_selected(self, project_name)
            self.start_selected_project()

    def clone_project(self, project_name, project_path):
        version_string = subprocess.Popen(
            args=["git", "log", "-1", "--pretty=%h"],
            cwd=project_path,
            stdout=subprocess.PIPE).communicate()[0].strip()

        project_versioned_path = os.path.join(
            self.data_dir, "projects",
            "%s-%s" % (project_name, version_string))
        if not os.path.exists(project_versioned_path):
            if not os.path.exists(os.path.join(self.data_dir, "projects")):
                os.makedirs(os.path.join(self.data_dir, "projects"))

            subprocess.Popen(
                args=["git", "clone", project_path, project_versioned_path],
                env=self.gitenv).communicate()

        return project_versioned_path

    def load_pipeline(self, pipeline_path, context):
        dirname, basename = os.path.split(pipeline_path)
        if dirname == "":
            dirname = "."

        with open(pipeline_path) as f:
            pipeline_str = f.read()

        ConfigValue.start_collecting()

        local_context = context
        global_context = context
        curdir = os.getcwd()
        try:
            os.chdir(dirname)
            exec pipeline_str in local_context, global_context
        finally:
            os.chdir(curdir)

        config_values = ConfigValue.stop_collecting()

        return (local_context["project"], local_context["pipeline"],
                config_values)

    @gen.engine
    def start_selected_project(self):
        project_name = self.selected_project

        if project_name in self.projects:
            # install or update project if necessary
            if not project_name in self.installed_projects or (yield gen.Task(
                    self.check_project_has_update, project_name)):
                result = yield gen.Task(self.install_project, project_name)
                if not result:
                    return

            # remove the configuration variables from the previous project
            if self.current_project:
                for config_value in self.current_project.config_values:
                    self.config_manager.remove(config_value.name)

            # the path with the project code
            # (this is the most recent code from the repository)
            project_path = os.path.join(self.projects_dir, project_name)

            # clone the project code to a versioned directory
            # where the pipeline is actually run
            project_versioned_path = self.clone_project(
                project_name, project_path)

            # load the pipeline from the versioned directory
            pipeline_path = os.path.join(project_versioned_path, "pipeline.py")
            (project, pipeline, config_values) = self.load_pipeline(
                pipeline_path, {"downloader": self.downloader})

            # add the configuration values to the config manager
            for config_value in config_values:
                self.config_manager.add(config_value)
            project.config_values = config_values

            # start the pipeline
            if not self.shut_down_flag and not self.reboot_flag:
                self.runner.set_current_pipeline(pipeline)

            self.current_project_name = project_name
            self.current_project = project

            self.on_project_refresh(self, self.current_project, self.runner)
            self.fire_status()

            if not self.shut_down_flag and not self.reboot_flag:
                self.runner.start()

        else:
            # project_name not in self.projects,
            # stop the current project (if there is one)
            self.runner.set_current_pipeline(None)
            self.fire_status()

    def handle_runner_finish(self, runner):
        if self.current_project:
            for config_value in self.current_project.config_values:
                self.config_manager.remove(config_value.name)

        self.current_project_name = None
        self.current_project = None

        self.on_project_refresh(self, self.current_project, self.runner)
        self.fire_status()

        if self.shut_down_flag or self.reboot_flag:
            ioloop.IOLoop.instance().stop()

            if self.real_shutdown:
                if self.shut_down_flag:
                    os.system("sudo shutdown -h now")
                elif self.reboot_flag:
                    os.system("sudo shutdown -r now")

    def start(self):
        if self.real_shutdown:
            # schedule a reboot
            ioloop.IOLoop.instance().add_timeout(datetime.timedelta(days=7),
                                                 self.max_age_reached)

        self.hq_updater.start()
        self.project_updater.start()
        self.update_warrior_hq()
        ioloop.IOLoop.instance().start()

    def max_age_reached(self):
        if self.real_shutdown:
            # time for an sanity reboot
            print "Running for more than 7 days. Time to schedule a reboot."
            self.reboot_gracefully()

            # schedule a forced reboot after two days
            self.schedule_forced_reboot()

    def reboot_gracefully(self):
        self.shut_down_flag = False
        self.reboot_flag = True
        self.fire_status()
        if self.runner.is_active():
            self.runner.set_current_pipeline(None)
        else:
            ioloop.IOLoop.instance().stop()
            if self.real_shutdown:
                os.system("sudo shutdown -r now")

    def schedule_forced_reboot(self):
        if self.real_shutdown and not self.forced_reboot_timeout:
            self.forced_reboot_timeout = ioloop.IOLoop.instance().add_timeout(
                datetime.timedelta(days=2), self.forced_reboot)

    def forced_reboot(self):
        print "Stopping immediately..."
        if self.real_shutdown:
            os.system("sudo shutdown -r now")

    def stop_gracefully(self):
        self.shut_down_flag = True
        self.reboot_flag = False
        self.fire_status()
        if self.runner.is_active():
            self.runner.set_current_pipeline(None)
        else:
            ioloop.IOLoop.instance().stop()
            if self.real_shutdown:
                os.system("sudo shutdown -h now")

    def forced_stop(self):
        ioloop.IOLoop.instance().stop()
        if self.real_shutdown:
            os.system("sudo shutdown -h now")

    def keep_running(self):
        self.shut_down_flag = False
        self.reboot_flag = False
        self.start_selected_project()
        self.fire_status()

    class Status(object):
        NO_PROJECT = "NO_PROJECT"
        INVALID_SETTINGS = "INVALID_SETTINGS"
        STOPPING_PROJECT = "STOPPING_PROJECT"
        RESTARTING_PROJECT = "RESTARTING_PROJECT"
        RUNNING_PROJECT = "RUNNING_PROJECT"
        SWITCHING_PROJECT = "SWITCHING_PROJECT"
        STARTING_PROJECT = "STARTING_PROJECT"
        SHUTTING_DOWN = "SHUTTING_DOWN"
        REBOOTING = "REBOOTING"

    def fire_status(self):
        self.on_status(self, self.warrior_status())

    def warrior_status(self):
        if self.shut_down_flag:
            return Warrior.Status.SHUTTING_DOWN
        elif self.reboot_flag:
            return Warrior.Status.REBOOTING
        elif not self.config_manager.all_valid():
            return Warrior.Status.INVALID_SETTINGS
        elif self.selected_project == None and self.current_project_name == None:
            return Warrior.Status.NO_PROJECT
        elif self.selected_project:
            if self.selected_project == self.current_project_name:
                return Warrior.Status.RUNNING_PROJECT
            else:
                return Warrior.Status.STARTING_PROJECT
        else:
            return Warrior.Status.STOPPING_PROJECT
def getSphinxDocumentationApi(packages, cloneDirectory, outputDirectory,
                              apiFile):
    """
	This definition gets Sphinx documentation API.

	:param packages: Packages. ( String )
	:param cloneDirectory: Source clone directory. ( String )
	:param outputDirectory: Content directory. ( String )
	:param apiFile: API file. ( String )
	"""

    LOGGER.info("{0} | Building Sphinx documentation API!".format(
        getSphinxDocumentationApi.__name__))

    if os.path.exists(cloneDirectory):
        shutil.rmtree(cloneDirectory)
        os.makedirs(cloneDirectory)

    packagesModules = {"apiModules": [], "testsModules": []}
    for package in packages.split(","):
        package = __import__(package)
        path = foundations.common.getFirstItem(package.__path__)
        sourceDirectory = os.path.dirname(path)

        for file in sorted(
                list(
                    foundations.walkers.filesWalker(
                        sourceDirectory,
                        filtersIn=("{0}.*\.ui$".format(path), )))):
            LOGGER.info("{0} | Ui file: '{1}'".format(
                getSphinxDocumentationApi.__name__, file))
            targetDirectory = os.path.dirname(file).replace(
                sourceDirectory, "")
            directory = "{0}{1}".format(cloneDirectory, targetDirectory)
            if not foundations.common.pathExists(directory):
                os.makedirs(directory)
            source = os.path.join(directory, os.path.basename(file))
            shutil.copyfile(file, source)

        modules = []
        for file in sorted(
                list(
                    foundations.walkers.filesWalker(
                        sourceDirectory,
                        filtersIn=("{0}.*\.py$".format(path), ),
                        filtersOut=EXCLUDED_PYTHON_MODULES))):
            LOGGER.info("{0} | Python file: '{1}'".format(
                getSphinxDocumentationApi.__name__, file))
            module = "{0}.{1}".format(
                (".".join(
                    os.path.dirname(file).replace(sourceDirectory,
                                                  "").split("/"))),
                foundations.strings.getSplitextBasename(file)).strip(".")
            LOGGER.info("{0} | Module name: '{1}'".format(
                getSphinxDocumentationApi.__name__, module))
            directory = os.path.dirname(
                os.path.join(cloneDirectory, module.replace(".", "/")))
            if not foundations.common.pathExists(directory):
                os.makedirs(directory)
            source = os.path.join(directory, os.path.basename(file))
            shutil.copyfile(file, source)

            sourceFile = File(source)
            sourceFile.cache()
            trimFromIndex = trimEndIndex = None
            inMultilineString = inDecorator = False
            for i, line in enumerate(sourceFile.content):
                if re.search(r"__name__ +\=\= +\"__main__\"", line):
                    trimFromIndex = i
                for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
                    if re.search(pattern, line):
                        sourceFile.content[i] = re.sub(pattern, value, line)

                strippedLine = line.strip()
                if re.search(r"^\"\"\"", strippedLine):
                    inMultilineString = not inMultilineString

                if inMultilineString:
                    continue

                if re.search(r"^@\w+", strippedLine) and \
                not re.search(r"@property", strippedLine) and \
                not re.search(r"^@\w+\.setter", strippedLine) and \
                not re.search(r"^@\w+\.deleter", strippedLine):
                    inDecorator = True
                    indent = re.search(r"^([ \t]*)", line)

                if re.search(r"^[ \t]*def \w+", sourceFile.content[i]) or \
                 re.search(r"^[ \t]*class \w+", sourceFile.content[i]):
                    inDecorator = False

                if not inDecorator:
                    continue

                sourceFile.content[i] = "{0}{1} {2}".format(
                    indent.groups()[0], DECORATORS_COMMENT_MESSAGE, line)

            if trimFromIndex:
                LOGGER.info("{0} | Trimming '__main__' statements!".format(
                    getSphinxDocumentationApi.__name__))
                content = [sourceFile.content[i] for i in range(trimFromIndex)]
                content.append("{0}\n".format(STATEMENTS_UPDATE_MESSAGGE))
                sourceFile.content = content
            sourceFile.write()

            if "__init__.py" in file:
                continue

            rstFilePath = "{0}{1}".format(module, FILES_EXTENSION)
            LOGGER.info("{0} | Building API file: '{1}'".format(
                getSphinxDocumentationApi.__name__, rstFilePath))
            rstFile = File(os.path.join(outputDirectory, rstFilePath))
            header = [
                "_`{0}`\n".format(module),
                "==={0}\n".format("=" * len(module)), "\n",
                ".. automodule:: {0}\n".format(module), "\n"
            ]
            rstFile.content.extend(header)

            functions = OrderedDict()
            classes = OrderedDict()
            moduleAttributes = OrderedDict()
            for member, object in moduleBrowser._readmodule(
                    module, [
                        source,
                    ]).iteritems():
                if object.__class__ == moduleBrowser.Function:
                    if not member.startswith("_"):
                        functions[member] = [
                            ".. autofunction:: {0}\n".format(member)
                        ]
                elif object.__class__ == moduleBrowser.Class:
                    classes[member] = [
                        ".. autoclass:: {0}\n".format(member),
                        "	:show-inheritance:\n", "	:members:\n"
                    ]
                elif object.__class__ == moduleBrowser.Global:
                    if not member.startswith("_"):
                        moduleAttributes[member] = [
                            ".. attribute:: {0}.{1}\n".format(module, member)
                        ]

            moduleAttributes and rstFile.content.append(
                "Module Attributes\n-----------------\n\n")
            for moduleAttribute in moduleAttributes.itervalues():
                rstFile.content.extend(moduleAttribute)
                rstFile.content.append("\n")

            functions and rstFile.content.append("Functions\n---------\n\n")
            for function in functions.itervalues():
                rstFile.content.extend(function)
                rstFile.content.append("\n")

            classes and rstFile.content.append("Classes\n-------\n\n")
            for class_ in classes.itervalues():
                rstFile.content.extend(class_)
                rstFile.content.append("\n")

            rstFile.write()
            modules.append(module)

        packagesModules["apiModules"].extend(
            [module for module in modules if not "tests" in module])
        packagesModules["testsModules"].extend(
            [module for module in modules if "tests" in module])

    apiFile = File(apiFile)
    apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN)
    for module in packagesModules["apiModules"]:
        apiFile.content.append("   {0} <{1}>\n".format(
            module, "api/{0}".format(module)))
    for module in packagesModules["testsModules"]:
        apiFile.content.append("   {0} <{1}>\n".format(
            module, "api/{0}".format(module)))
    apiFile.content.extend(TOCTREE_TEMPLATE_END)
    apiFile.write()
Exemple #10
0
 def apply(self):
     """Transform each
     :class:`~sphinxcontrib.bibtex.nodes.bibliography` node into a
     list of citations.
     """
     env = self.document.settings.env
     for bibnode in self.document.traverse(bibliography):
         # get the information of this bibliography node
         # by looking up its id in the bibliography cache
         id_ = bibnode['ids'][0]
         infos = [info for other_id, info
                  in env.bibtex_cache.bibliographies.iteritems()
                  if other_id == id_ and info.docname == env.docname]
         if not infos:
             raise RuntimeError(
                 "document %s has no bibliography nodes with id '%s'"
                 % (env.docname, id_))
         elif len(infos) >= 2:
             raise RuntimeError(
                 "document %s has multiple bibliography nodes with id '%s'"
                 % (env.docname, id_))
         info = infos[0]
         # generate entries
         entries = OrderedDict()
         for bibfile in info.bibfiles:
             # XXX entries are modified below in an unpickable way
             # XXX so fetch a deep copy
             data = env.bibtex_cache.bibfiles[bibfile].data
             if info.cite == "all":
                 bibfile_entries = data.entries.itervalues()
             elif info.cite == "cited":
                 bibfile_entries = (
                     entry for entry in data.entries.itervalues()
                     if env.bibtex_cache.is_cited(entry.key))
             elif info.cite == "notcited":
                 bibfile_entries = (
                     entry for entry in data.entries.itervalues()
                     if not env.bibtex_cache.is_cited(entry.key))
             else:
                 raise RuntimeError("invalid cite option (%s)" % info.cite)
             for entry in bibfile_entries:
                 entries[entry.key] = copy.deepcopy(entry)
         # order entries according to which were cited first
         # first, we add all keys that were cited
         # then, we add all remaining keys
         sorted_entries = []
         for key in env.bibtex_cache.get_all_cited_keys():
             try:
                 entry = entries.pop(key)
             except KeyError:
                 pass
             else:
                 sorted_entries.append(entry)
         sorted_entries += entries.itervalues()
         # locate and instantiate style plugin
         style_cls = find_plugin(
             'pybtex.style.formatting', info.style)
         style = style_cls()
         # create citation nodes for all references
         backend = output_backend()
         if info.list_ == "enumerated":
             nodes = docutils.nodes.enumerated_list()
             nodes['enumtype'] = info.enumtype
             if info.start >= 1:
                 nodes['start'] = info.start
                 env.bibtex_cache.set_enum_count(env.docname, info.start)
             else:
                 nodes['start'] = env.bibtex_cache.get_enum_count(env.docname)
         elif info.list_ == "bullet":
             nodes = docutils.nodes.bullet_list()
         else: # "citation"
             nodes = docutils.nodes.paragraph()
         # XXX style.format_entries modifies entries in unpickable way
         for entry in style.format_entries(sorted_entries):
             if info.list_ == "enumerated" or info.list_ == "bullet":
                 citation = docutils.nodes.list_item()
                 citation += entry.text.render(backend)
             else: # "citation"
                 citation = backend.citation(entry, self.document)
                 # backend.citation(...) uses entry.key as citation label
                 # we change it to entry.label later onwards
                 # but we must note the entry.label now;
                 # at this point, we also already prefix the label
                 key = citation[0].astext()
                 info.labels[key] = info.labelprefix + entry.label
             node_text_transform(citation, transform_url_command)
             if info.curly_bracket_strip:
                 node_text_transform(citation, transform_curly_bracket_strip)
             nodes += citation
             if info.list_ == "enumerated":
                 env.bibtex_cache.inc_enum_count(env.docname)
         bibnode.replace_self(nodes)
Exemple #11
0
class SectionsFileParser(foundations.io.File):
	"""
	This class provides methods to parse sections file format files,
	an alternative configuration file parser is available directly with Python: :class:`ConfigParser.ConfigParser`.

	The parser given by this class has some major differences with Python :class:`ConfigParser.ConfigParser`:

		- | Sections and attributes are stored in their appearance order by default.
			( Using Python :class:`collections.OrderedDict` )
		- | A default section ( **_default** ) will store orphans attributes 
			( Attributes appearing before any declared section ).
		- File comments are stored inside the :obj:`SectionsFileParser.comments` class property. 
		- | Sections, attributes and values are whitespaces stripped by default
			but can also be stored with their leading and trailing whitespaces. 
		- | Values are quotations markers stripped by default
			but can also be stored with their leading and trailing quotations markers. 
		- Attributes are namespaced by default allowing sections merge without keys collisions. 

	"""

	def __init__(self,
				file=None,
				splitters=("=", ":"),
				namespaceSplitter="|",
				commentLimiters=(";", "#"),
				commentMarker="#",
				quotationMarkers=("\"", "'", "`"),
				rawSectionContentIdentifier="_rawSectionContent",
				defaultsSection="_defaults"):
		"""
		This method initializes the class.
		
		Usage::
		
			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse(stripComments=False)
			True
			>>> sectionsFileParser.sections.keys()
			['Section A', 'Section B']
			>>> sectionsFileParser.comments 
			OrderedDict([('Section A|#0', {'content': 'Comment.', 'id': 0})])

		:param file: Current file path. ( String )
		:param splitters: Splitter characters.  ( Tuple / List )
		:param namespaceSplitter: Namespace splitters character. ( String )
		:param commentLimiters: Comment limiters characters. ( Tuple / List )
		:param commentMarker: Character use to prefix extracted comments idientifiers. ( String )
		:param quotationMarkers: Quotation markers characters. ( Tuple / List )
		:param rawSectionContentIdentifier: Raw section content identifier. ( String )
		:param defaultsSection: Default section name. ( String )
		"""

		LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))

		foundations.io.File.__init__(self, file)

		# --- Setting class attributes. ---
		self.__splitters = None
		self.splitters = splitters
		self.__namespaceSplitter = None
		self.namespaceSplitter = namespaceSplitter
		self.__commentLimiters = None
		self.commentLimiters = commentLimiters
		self.__commentMarker = None
		self.commentMarker = commentMarker
		self.__quotationMarkers = None
		self.quotationMarkers = quotationMarkers
		self.__rawSectionContentIdentifier = None
		self.rawSectionContentIdentifier = rawSectionContentIdentifier
		self.__defaultsSection = None
		self.defaultsSection = defaultsSection

		self.__sections = None
		self.__comments = None
		self.__parsingErrors = None

	#******************************************************************************************************************
	#***	Attributes properties.
	#******************************************************************************************************************
	@property
	def splitters(self):
		"""
		This method is the property for **self.__splitters** attribute.

		:return: self.__splitters. ( Tuple / List )
		"""

		return self.__splitters

	@splitters.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def splitters(self, value):
		"""
		This method is the setter method for **self.__splitters** attribute.

		:param value: Attribute value. ( Tuple / List )
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
			"splitters", value)
			for element in value:
				assert type(element) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"splitters", element)
				assert len(element) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("splitter", element)
				assert not re.search(r"\w", element), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
				"splitter", element)
		self.__splitters = value

	@splitters.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def splitters(self):
		"""
		This method is the deleter method for **self.__splitters** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "splitters"))

	@property
	def namespaceSplitter(self):
		"""
		This method is the property for **self.__namespaceSplitter** attribute.

		:return: self.__namespaceSplitter. ( String )
		"""

		return self.__namespaceSplitter

	@namespaceSplitter.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def namespaceSplitter(self, value):
		"""
		This method is the setter method for **self.__namespaceSplitter** attribute.

		:param value: Attribute value. ( String )
		"""

		if value is not None:
			assert type(value) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
			"namespaceSplitter", value)
			assert len(value) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("namespaceSplitter", value)
			assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
			"namespaceSplitter", value)
		self.__namespaceSplitter = value

	@namespaceSplitter.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def namespaceSplitter(self):
		"""
		This method is the deleter method for **self.__namespaceSplitter** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "namespaceSplitter"))

	@property
	def commentLimiters(self):
		"""
		This method is the property for **self.__commentLimiters** attribute.

		:return: self.__commentLimiters. ( Tuple / List )
		"""

		return self.__commentLimiters

	@commentLimiters.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def commentLimiters(self, value):
		"""
		This method is the setter method for **self.__commentLimiters** attribute.

		:param value: Attribute value. ( Tuple / List )
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
			"commentLimiters", value)
			for element in value:
				assert type(element) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"commentLimiters", element)
		self.__commentLimiters = value

	@commentLimiters.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def commentLimiters(self):
		"""
		This method is the deleter method for **self.__commentLimiters** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "commentLimiters"))

	@property
	def commentMarker(self):
		"""
		This method is the property for **self.__commentMarker** attribute.

		:return: self.__commentMarker. ( String )
		"""

		return self.__commentMarker

	@commentMarker.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def commentMarker(self, value):
		"""
		This method is the setter method for **self.__commentMarker** attribute.

		:param value: Attribute value. ( String )
		"""

		if value is not None:
			assert type(value) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
			"commentMarker", value)
			assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
			"commentMarker", value)
		self.__commentMarker = value

	@commentMarker.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def commentMarker(self):
		"""
		This method is the deleter method for **self.__commentMarker** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "commentMarker"))

	@property
	def quotationMarkers(self):
		"""
		This method is the property for **self.__quotationMarkers** attribute.

		:return: self.__quotationMarkers. ( Tuple / List )
		"""

		return self.__quotationMarkers

	@quotationMarkers.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def quotationMarkers(self, value):
		"""
		This method is the setter method for **self.__quotationMarkers** attribute.

		:param value: Attribute value. ( Tuple / List )
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
			"quotationMarkers", value)
			for element in value:
				assert type(element) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"quotationMarkers", element)
				assert len(element) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("quotationMarkers",
				 																					element)
				assert not re.search(r"\w", element), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
				"quotationMarkers", element)
		self.__quotationMarkers = value

	@quotationMarkers.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def quotationMarkers(self):
		"""
		This method is the deleter method for **self.__quotationMarkers** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "quotationMarkers"))

	@property
	def rawSectionContentIdentifier(self):
		"""
		This method is the property for **self.__rawSectionContentIdentifier** attribute.

		:return: self.__rawSectionContentIdentifier. ( String )
		"""

		return self.__rawSectionContentIdentifier

	@rawSectionContentIdentifier.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def rawSectionContentIdentifier(self, value):
		"""
		This method is the setter method for **self.__rawSectionContentIdentifier** attribute.

		:param value: Attribute value. ( String )
		"""

		if value is not None:
			assert type(value) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
			"rawSectionContentIdentifier", value)
		self.__rawSectionContentIdentifier = value

	@rawSectionContentIdentifier.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def rawSectionContentIdentifier(self):
		"""
		This method is the deleter method for **self.__rawSectionContentIdentifier** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "rawSectionContentIdentifier"))

	@property
	def defaultsSection(self):
		"""
		This method is the property for **self.__defaultsSection** attribute.

		:return: self.__defaultsSection. ( String )
		"""

		return self.__defaultsSection

	@defaultsSection.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultsSection(self, value):
		"""
		This method is the setter method for **self.__defaultsSection** attribute.

		:param value: Attribute value. ( String )
		"""

		if value is not None:
			assert type(value) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
			"defaultsSection", value)
		self.__defaultsSection = value

	@defaultsSection.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultsSection(self):
		"""
		This method is the deleter method for **self.__defaultsSection** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultsSection"))

	@property
	def sections(self):
		"""
		This method is the property for **self.__sections** attribute.

		:return: self.__sections. ( OrderedDict / Dictionary )
		"""

		return self.__sections

	@sections.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def sections(self, value):
		"""
		This method is the setter method for **self.__sections** attribute.

		:param value: Attribute value. ( OrderedDict / Dictionary )
		"""

		if value is not None:
			assert type(value) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
			'OrderedDict' or 'dict'!".format("sections", value)
			for key, element in value.iteritems():
				assert type(key) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"sections", key)
				assert type(element) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
				'OrderedDict' or 'dict'!".format("sections", key)
		self.__sections = value

	@sections.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def sections(self):
		"""
		This method is the deleter method for **self.__sections** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "sections"))

	@property
	def comments(self):
		"""
		This method is the property for **self.__comments** attribute.

		:return: self.__comments. ( OrderedDict / Dictionary )
		"""

		return self.__comments

	@comments.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def comments(self, value):
		"""
		This method is the setter method for **self.__comments** attribute.

		:param value: Attribute value. ( OrderedDict / Dictionary )
		"""

		if value is not None:
			assert type(value) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
			'OrderedDict' or 'dict'!".format("comments", value)
			for key, element in value.iteritems():
				assert type(key) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"comments", key)
				assert type(element) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
				'OrderedDict' or 'dict'!".format("comments", key)
		self.__comments = value

	@comments.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def comments(self):
		"""
		This method is the deleter method for **self.__comments** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "comments"))

	@property
	def parsingErrors(self):
		"""
		This method is the property for **self.__parsingErrors** attribute.

		:return: self.__parsingErrors. ( List )
		"""

		return self.__parsingErrors

	@parsingErrors.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def parsingErrors(self, value):
		"""
		This method is the setter method for **self.__parsingErrors** attribute.

		:param value: Attribute value. ( List )
		"""

		if value is not None:
			assert type(value) is list, "'{0}' attribute: '{1}' type is not 'list'!".format("parsingErrors", value)
			for element in value:
				assert issubclass(element.__class__, foundations.exceptions.AbstractParsingError), \
				"'{0}' attribute: '{1}' is not a '{2}' subclass!".format(
				"parsingErrors", element, foundations.exceptions.AbstractParsingError.__class__.__name__)
		self.__parsingErrors = value

	@parsingErrors.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def parsingErrors(self):
		"""
		This method is the deleter method for **self.__parsingErrors** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "parsingErrors"))

	#******************************************************************************************************************
	#***	Class methods.
	#******************************************************************************************************************
	@foundations.exceptions.handleExceptions(foundations.exceptions.FileStructureParsingError)
	def parse(self,
			orderedDictionary=True,
			rawSections=None,
			namespaces=True,
			stripComments=True,
			stripWhitespaces=True,
			stripQuotationMarkers=True,
			raiseParsingErrors=True):
		"""
		This method process the file content and extract the sections / attributes
			as nested :class:`collections.OrderedDict` dictionaries or dictionaries.

		Usage::

			>>> content = ["; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse(stripComments=False)
			True
			>>> sectionsFileParser.sections.keys()
			['_defaults']
			>>> sectionsFileParser.sections["_defaults"].values()
			['Value A', 'Value B']
			>>> sectionsFileParser.parse(stripQuotationMarkers=False)
			True
			>>> sectionsFileParser.sections["_defaults"].values()
			['"Value A"', '"Value B"']
			>>> sectionsFileParser.comments 
			OrderedDict([('_defaults|#0', {'content': 'Comment.', 'id': 0})])
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.sections["_defaults"]
			OrderedDict([('_defaults|Attribute 1', 'Value A'), ('_defaults|Attribute 2', 'Value B')])
			>>> sectionsFileParser.parse(namespaces=False)
			OrderedDict([('Attribute 1', 'Value A'), ('Attribute 2', 'Value B')])

		:param orderedDictionary: SectionsFileParser data is stored
			in :class:`collections.OrderedDict` dictionaries. ( Boolean )
		:param rawSections: Ignored raw sections. ( Tuple / List )
		:param namespaces: Attributes and comments are namespaced. ( Boolean )
		:param stripComments: Comments are stripped. ( Boolean )
		:param stripWhitespaces: Whitespaces are stripped. ( Boolean )
		:param stripQuotationMarkers: Attributes values quotation markers are stripped. ( Boolean )
		:param raiseParsingErrors: Raise parsing errors. ( Boolean )
		:return: Method success. ( Boolean )
		"""

		LOGGER.debug("> Reading sections from: '{0}'.".format(self.path))

		if not self.content:
			return False

		if not orderedDictionary:
			self.__sections = {}
			self.__comments = {}
			attributes = {}
		else:
			self.__sections = OrderedDict()
			self.__comments = OrderedDict()
			attributes = OrderedDict()
		section = self.__defaultsSection
		rawSections = rawSections or []
		self.__parsingErrors = []

		commentId = 0
		for i, line in enumerate(self.content):
			# Comments matching.
			search = re.search(r"^\s*[{0}](?P<comment>.+)$".format("".join(self.__commentLimiters)), line)
			if search:
				if not stripComments:
					comment = namespaces and foundations.namespace.setNamespace(section, "{0}{1}".format(
							self.__commentMarker, commentId), self.__namespaceSplitter) or \
							"{0}{1}".format(self.__commentMarker, commentId)
					self.__comments[comment] = {"id" : commentId, "content" : stripWhitespaces and \
												search.group("comment").strip() or search.group("comment")}
					commentId += 1
				continue

			# Sections matching.
			search = re.search(r"^\s*\[(?P<section>.+)\]\s*$", line)
			if search:
				section = stripWhitespaces and search.group("section").strip() or search.group("section")
				if not orderedDictionary:
					attributes = {}
				else:
					attributes = OrderedDict()
				rawContent = []
				continue

			if section in rawSections:
				rawContent.append(line)
				attributes[self.__rawSectionContentIdentifier] = rawContent
			else:
				# Empty line matching.
				search = re.search(r"^\s*$", line)
				if search:
					continue

				# Attributes matching.
				search = re.search(r"^(?P<attribute>.+?)[{0}](?P<value>.+)$".format("".join(self.__splitters)), line)
				if search:
					attribute = stripWhitespaces and search.group("attribute").strip() or search.group("attribute")
					attribute = namespaces and foundations.namespace.setNamespace(section,
																				attribute,
																				self.__namespaceSplitter) or attribute
					value = stripWhitespaces and search.group("value").strip() or search.group("value")
					attributes[attribute] = stripQuotationMarkers and value.strip("".join(self.__quotationMarkers)) or value
				else:
					self.__parsingErrors.append(foundations.exceptions.AttributeStructureParsingError(
					"Attribute structure is invalid: {0}".format(line), i + 1))

			self.__sections[section] = attributes

		LOGGER.debug("> Sections: '{0}'.".format(self.__sections))
		LOGGER.debug("> '{0}' file parsing done!".format(self.path))

		if self.__parsingErrors and raiseParsingErrors:
			raise foundations.exceptions.FileStructureParsingError(
			"{0} | '{1}' structure is invalid, parsing exceptions occured!".format(self.__class__.__name__, self.path))

		return True

	def sectionExists(self, section):
		"""
		This method checks if given section exists.
		
		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.sectionExists("Section A")
			True
			>>> sectionsFileParser.sectionExists("Section C")
			False

		:param section: Section to check existence. ( String )
		:return: Section existence. ( Boolean )
		"""

		if not self.__sections:
			return False

		if section in self.__sections:
			LOGGER.debug("> '{0}' section exists in '{1}'.".format(section, self))
			return True
		else:
			LOGGER.debug("> '{0}' section doesn't exists in '{1}'.".format(section, self))
			return False

	def attributeExists(self, attribute, section):
		"""
		This method checks if given attribute exists.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.attributeExists("Attribute 1", "Section A")
			True
			>>> sectionsFileParser.attributeExists("Attribute 2", "Section A")
			False

		:param attribute: Attribute to check existence. ( String )
		:param section: Section to search attribute into. ( String )
		:return: Attribute existence. ( Boolean )
		"""

		if not self.__sections:
			return False

		if foundations.namespace.removeNamespace(attribute, rootOnly=True) in self.getAttributes(section,
																					orderedDictionary=True,
																					stripNamespaces=True):
			LOGGER.debug("> '{0}' attribute exists in '{1}' section.".format(attribute, section))
			return True
		else:
			LOGGER.debug("> '{0}' attribute doesn't exists in '{1}' section.".format(attribute, section))
			return False

	def getAttributes(self, section, orderedDictionary=True, stripNamespaces=False):
		"""
		This method returns given section attributes.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.getAttributes("Section A")
			OrderedDict([('Section A|Attribute 1', 'Value A')])
			>>> sectionsFileParser.getAttributes("Section A", orderedDictionary=False)
			{'Section A|Attribute 1': 'Value A'}
			>>> sectionsFileParser.getAttributes("Section A", stripNamespaces=True)
			OrderedDict([('Attribute 1', 'Value A')])

		:param section: Section containing the requested attributes. ( String )
		:param orderedDictionary: Use an :class:`collections.OrderedDict` dictionary to store the attributes. ( String )
		:param stripNamespaces: Strip namespaces while retrieving attributes. ( Boolean )
		:return: Attributes. ( OrderedDict / Dictionary )
		"""

		LOGGER.debug("> Getting section '{0}' attributes.".format(section))
		dictionary = orderedDictionary and OrderedDict or dict
		attributes = dictionary()
		if not self.sectionExists(section):
			return attributes

		if stripNamespaces:
			for attribute, value in self.__sections[section].iteritems():
				attributes[foundations.namespace.removeNamespace(attribute, rootOnly=True)] = value
		else:
			attributes.update(self.__sections[section])
		LOGGER.debug("> Attributes: '{0}'.".format(attributes))
		return attributes

	def getAllAttributes(self, orderedDictionary=True):
		"""
		This method returns all sections attributes.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.getAllAttributes()
			OrderedDict([('Section A|Attribute 1', 'Value A'), ('Section B|Attribute 2', 'Value B')])
			>>> sectionsFileParser.getAllAttributes(orderedDictionary=False)
			{'Section B|Attribute 2': 'Value B', 'Section A|Attribute 1': 'Value A'}

		:param orderedDictionary: Use an :class:`collections.OrderedDict` dictionary to store the attributes. ( String )
		:return: All sections / files attributes. ( OrderedDict / Dictionary )
		"""

		dictionary = orderedDictionary and OrderedDict or dict
		allAttributes = dictionary()
		if not self.__sections:
			return allAttributes

		for attributes in self.__sections.itervalues():
			for attribute, value in attributes.iteritems():
				allAttributes[attribute] = value
		return allAttributes

	def getValue(self, attribute, section, encode=False, default=str()):
		"""
		This method returns requested attribute value.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.getValue("Attribute 1", "Section A")
			Value A

		:param attribute: Attribute name. ( String )
		:param section: Section containing the searched attribute. ( String )
		:param encode: Encode value to unicode. ( Boolean )
		:param default: Default return value. ( Object )
		:return: Attribute value. ( String )
		"""

		if not self.__sections:
			return default

		if not self.attributeExists(attribute, section):
			return default

		if attribute in self.__sections[section]:
			value = self.__sections[section][attribute]
		elif foundations.namespace.setNamespace(section, attribute) in self.__sections[section]:
			value = self.__sections[section][foundations.namespace.setNamespace(section, attribute)]
		LOGGER.debug("> Attribute: '{0}', value: '{1}'.".format(attribute, value))
		value = foundations.strings.encode(value) if encode else value
		return value

	def write(self,
			namespaces=False,
			splitter="=",
			commentLimiter=(";"),
			spacesAroundSplitter=True,
			spaceAfterCommentLimiter=True):
		"""
		This method writes defined file using :obj:`SectionsFileParser.sections` and
			:obj:`SectionsFileParser.comments` class properties content.

		Usage::

			>>> sections = {"Section A": {"Section A|Attribute 1": "Value A"}, \
"Section B": {"Section B|Attribute 2": "Value B"}}
			>>> sectionsFileParser = SectionsFileParser("SectionsFile.rc")
			>>> sectionsFileParser.sections = sections
			>>> sectionsFileParser.write()
			True
			>>> sectionsFileParser.read()
			True
			>>> print sectionsFileParser.content[0:5]
			['[Section A]\\n', 'Attribute 1 = Value A\\n', '\\n', '[Section B]\\n', 'Attribute 2 = Value B\\n', '\\n']

		:param namespaces: Attributes are namespaced. ( Boolean )
		:param splitter: Splitter character. ( String )
		:param commentLimiter: Comment limiter character. ( String )
		:param spacesAroundSplitter: Spaces around attributes and value splitters. ( Boolean )
		:param spaceAfterCommentLimiter: Space after comments limiter. ( Boolean )
		:return: Method success. ( Boolean )
		"""

		if not self.__sections:
			return False

		LOGGER.debug("> Setting '{0}' file content.".format(self.path))
		attributeTemplate = spacesAroundSplitter and "{{0}} {0} {{1}}\n".format(splitter) or \
							"{{0}} {0} {{1}}\n".format(splitter)
		attributeTemplate = foundations.strings.replace(attributeTemplate, {"{{" : "{", "}}" : "}"})
		commentTemplate = spaceAfterCommentLimiter and "{0} {{0}}\n".format(commentLimiter) or \
							"{0}{{0}}\n".format(commentLimiter)
		if self.__defaultsSection in self.__sections:
			LOGGER.debug("> Appending '{0}' default section.".format(self.__defaultsSection))
			if self.__comments:
				for comment, value in self.__comments.iteritems():
					if self.__defaultsSection in comment:
						value = value["content"] or ""
						LOGGER.debug("> Appending '{0}' comment with '{1}' value.".format(comment, value))
						self.content.append(commentTemplate.format(value))
			for attribute, value in self.__sections[self.__defaultsSection].iteritems():
				attribute = namespaces and attribute or foundations.namespace.removeNamespace(attribute,
																							self.__namespaceSplitter,
																							rootOnly=True)
				value = value or ""
				LOGGER.debug("> Appending '{0}' attribute with '{1}' value.".format(attribute, value))
				self.content.append(attributeTemplate.format(attribute, value))
			self.content.append("\n")

		for i, section in enumerate(self.__sections):
			LOGGER.debug("> Appending '{0}' section.".format(section))
			self.content.append("[{0}]\n".format(section))
			if self.__comments:
				for comment, value in self.__comments.iteritems():
					if section in comment:
						value = value["content"] or ""
						LOGGER.debug("> Appending '{0}' comment with '{1}' value.".format(comment, value))
						self.content.append(commentTemplate.format(value))
			for attribute, value in self.__sections[section].iteritems():
				if foundations.namespace.removeNamespace(attribute) == self.__rawSectionContentIdentifier:
					LOGGER.debug("> Appending '{0}' raw section content.".format(section))
					for line in value:
						self.content.append(line)
				else:
					LOGGER.debug("> Appending '{0}' section.".format(section))
					attribute = namespaces and attribute or foundations.namespace.removeNamespace(attribute,
																								self.__namespaceSplitter,
																								rootOnly=True)
					value = value or ""
					LOGGER.debug("> Appending '{0}' attribute with '{1}' value.".format(attribute, value))
					self.content.append(attributeTemplate.format(attribute, value))
			if i != len(self.__sections) - 1:
				self.content.append("\n")
		foundations.io.File.write(self)
		return True
def buildApi(packages, input, output, sanitizer, excludedModules=None):
	"""
	Builds the Sphinx documentation API.

	:param packages: Packages to include in the API.
	:type packages: list
	:param input: Input modules directory.
	:type input: unicode
	:param output: Output reStructuredText files directory.
	:type output: unicode
	:param sanitizer: Sanitizer python module.
	:type sanitizer: unicode
	:param excludedModules: Excluded modules.
	:type excludedModules: list
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Building Sphinx documentation API!".format(buildApi.__name__))

	sanitizer = importSanitizer(sanitizer)

	if os.path.exists(input):
		shutil.rmtree(input)
		os.makedirs(input)

	excludedModules = [] if excludedModules is None else excludedModules

	packagesModules = {"apiModules": [],
					   "testsModules": []}
	for package in packages:
		package = __import__(package)
		path = foundations.common.getFirstItem(package.__path__)
		packageDirectory = os.path.dirname(path)

		for file in sorted(
				list(foundations.walkers.filesWalker(packageDirectory, filtersIn=("{0}.*\.ui$".format(path),)))):
			LOGGER.info("{0} | Ui file: '{1}'".format(buildApi.__name__, file))
			targetDirectory = os.path.dirname(file).replace(packageDirectory, "")
			directory = "{0}{1}".format(input, targetDirectory)
			if not foundations.common.pathExists(directory):
				os.makedirs(directory)
			source = os.path.join(directory, os.path.basename(file))
			shutil.copyfile(file, source)

		modules = []
		for file in sorted(
				list(foundations.walkers.filesWalker(packageDirectory, filtersIn=("{0}.*\.py$".format(path),),
													 filtersOut=excludedModules))):
			LOGGER.info("{0} | Python file: '{1}'".format(buildApi.__name__, file))
			module = "{0}.{1}".format((".".join(os.path.dirname(file).replace(packageDirectory, "").split("/"))),
									  foundations.strings.getSplitextBasename(file)).strip(".")
			LOGGER.info("{0} | Module name: '{1}'".format(buildApi.__name__, module))
			directory = os.path.dirname(os.path.join(input, module.replace(".", "/")))
			if not foundations.common.pathExists(directory):
				os.makedirs(directory)
			source = os.path.join(directory, os.path.basename(file))
			shutil.copyfile(file, source)

			sanitizer.bleach(source)

			if "__init__.py" in file:
				continue

			rstFilePath = "{0}{1}".format(module, FILES_EXTENSION)
			LOGGER.info("{0} | Building API file: '{1}'".format(buildApi.__name__, rstFilePath))
			rstFile = File(os.path.join(output, rstFilePath))
			header = ["_`{0}`\n".format(module),
					  "==={0}\n".format("=" * len(module)),
					  "\n",
					  ".. automodule:: {0}\n".format(module),
					  "\n"]
			rstFile.content.extend(header)

			functions = OrderedDict()
			classes = OrderedDict()
			moduleAttributes = OrderedDict()
			for member, object in moduleBrowser._readmodule(module, [source, ]).iteritems():
				if object.__class__ == moduleBrowser.Function:
					if not member.startswith("_"):
						functions[member] = [".. autofunction:: {0}\n".format(member)]
				elif object.__class__ == moduleBrowser.Class:
					classes[member] = [".. autoclass:: {0}\n".format(member),
									   "	:show-inheritance:\n",
									   "	:members:\n"]
				elif object.__class__ == moduleBrowser.Global:
					if not member.startswith("_"):
						moduleAttributes[member] = [".. attribute:: {0}.{1}\n".format(module, member)]

			moduleAttributes and rstFile.content.append("Module Attributes\n-----------------\n\n")
			for moduleAttribute in moduleAttributes.itervalues():
				rstFile.content.extend(moduleAttribute)
				rstFile.content.append("\n")

			functions and rstFile.content.append("Functions\n---------\n\n")
			for function in functions.itervalues():
				rstFile.content.extend(function)
				rstFile.content.append("\n")

			classes and rstFile.content.append("Classes\n-------\n\n")
			for class_ in classes.itervalues():
				rstFile.content.extend(class_)
				rstFile.content.append("\n")

			rstFile.write()
			modules.append(module)

		packagesModules["apiModules"].extend([module for module in modules if not "tests" in module])
		packagesModules["testsModules"].extend([module for module in modules if "tests" in module])

	apiFile = File("{0}{1}".format(output, FILES_EXTENSION))
	apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN)
	for module in packagesModules["apiModules"]:
		apiFile.content.append("   {0} <{1}>\n".format(module, "api/{0}".format(module)))
	for module in packagesModules["testsModules"]:
		apiFile.content.append("   {0} <{1}>\n".format(module, "api/{0}".format(module)))
	apiFile.content.extend(TOCTREE_TEMPLATE_END)
	apiFile.write()

	return True
Exemple #13
0
class XRCCodeWriter(BaseCodeWriter):
    """\
    Code writer class for writing XRC XML code out of the designed GUI elements
    """

    default_extensions = ['xrc']
    language = "XRC"

    xrc_objects = None
    """\
    dictionary of active L{XrcObject} instances: during the code generation
    it stores all the non-sizer objects that have children (i.e. frames,
    dialogs, panels, notebooks, etc.), while at the end of the code generation,
    before L{finalize} is called, it contains only the true toplevel objects
    (frames and dialogs), and is used to write their XML code
    (see L{finalize}). The other objects are deleted when L{add_object} is
    called with their corresponding code_object as argument
    (see L{add_object})
    """

    global_property_writers = {
        'font': FontPropertyHandler,
        'events': EventsPropertyHandler,
        'extraproperties': ExtraPropertiesPropertyHandler,
        }
    """\
    Dictionary whose items are custom handlers for widget properties
    """

    property_writers = {}
    """\
    Dictionary of dictionaries of property handlers specific for a widget
    the keys are the class names of the widgets

    Example: property_writers['wxRadioBox'] = {'choices', choices_handler}
    """

    obj_builders = {}
    """\
    Dictionary of ``writers'' for the various objects
    """

    tmpl_encoding = '<?xml version="1.0" encoding="%s"?>\n'
    tmpl_generated_by = '<!-- %(generated_by)s -->'

    # Nested classes
    class XrcObject(object):
        """\
        Class to produce the XRC code for a given widget. This is a base class
        which does nothing
        """
        def __init__(self):
            self.properties = {}
            self.children = []  # sub-objects

        def write_child_prologue(self, child, out_file, ntabs):
            pass

        def write_child_epilogue(self, child, out_file, ntabs):
            pass

        def write_property(self, name, val, outfile, ntabs):
            pass

        def write(self, out_file, ntabs):
            pass

        def warning(self, msg):
            """\
            Show a warning message

            @param msg: Warning message
            @type msg:  String
            @see: L{common.MessageLogger.warn()}
            """
            common.message.warn(msg)

    # end of class XrcObject

    class SizerItemXrcObject(XrcObject):
        """\
        XrcObject to handle sizer items
        """
        def __init__(self, obj, option, flag, border):
            XRCCodeWriter.XrcObject.__init__(self)
            self.obj = obj  # the XrcObject representing the widget
            self.option = option
            self.flag = flag
            self.border = border

        def write(self, out_file, ntabs):
            write = out_file.write
            write(self.tabs(ntabs) + '<object class="sizeritem">\n')
            if self.option != '0':
                write(self.tabs(ntabs + 1) + '<option>%s</option>\n' % \
                    self.option)
            if self.flag and self.flag != '0':
                write(self.tabs(ntabs + 1) + '<flag>%s</flag>\n' % self.flag)
            if self.border != '0':
                write(self.tabs(ntabs + 1) + '<border>%s</border>\n' % \
                    self.border)
            # write the widget
            self.obj.write(out_file, ntabs + 1)
            write(self.tabs(ntabs) + '</object>\n')

    # end of class SizerItemXrcObject

    class SpacerXrcObject(XrcObject):
        """\
        XrcObject to handle widgets
        """
        def __init__(self, size_str, option, flag, border):
            XRCCodeWriter.XrcObject.__init__(self)
            self.size_str = size_str
            self.option = option
            self.flag = flag
            self.border = border

        def write(self, out_file, ntabs):
            write = out_file.write
            write(self.tabs(ntabs) + '<object class="spacer">\n')
            write(self.tabs(ntabs + 1) + \
                  '<size>%s</size>\n' % self.size_str.strip())
            if self.option != '0':
                write(self.tabs(ntabs + 1) + '<option>%s</option>\n' % \
                    self.option)
            if self.flag and self.flag != '0':
                write(self.tabs(ntabs + 1) + '<flag>%s</flag>\n' % self.flag)
            if self.border != '0':
                write(self.tabs(ntabs + 1) + '<border>%s</border>\n' % \
                    self.border)
            write(self.tabs(ntabs) + '</object>\n')

    # end of class SpacerXrcObject

    class DefaultXrcObject(XrcObject):
        """\
        Standard XrcObject for every widget, used if no specific XrcObject is
        available
        """
        def __init__(self, code_obj):
            XRCCodeWriter.XrcObject.__init__(self)
            self.properties = code_obj.properties
            self.code_obj = code_obj
            self.name = code_obj.name
            self.klass = code_obj.base  # custom classes aren't allowed in XRC
            self.subclass = code_obj.klass

        def write_property(self, name, val, outfile, ntabs):
            if val:
                name = escape(name)
                outfile.write(self.tabs(ntabs) + '<%s>%s</%s>\n' % \
                              (name, escape(val), name))

        def write(self, out_file, ntabs):
            write = out_file.write
            if self.code_obj.in_sizers:
                write(self.tabs(ntabs) + \
                      '<object class=%s>\n' % quoteattr(self.klass))
            else:
                if self.subclass and self.subclass != self.klass:
                    write(self.tabs(ntabs) +
                          '<object class=%s name=%s subclass=%s>\n' % \
                          (quoteattr(self.klass), quoteattr(self.name),
                           quoteattr(self.subclass)))
                else:
                    write(self.tabs(ntabs) + '<object class=%s name=%s>\n' % \
                          (quoteattr(self.klass), quoteattr(self.name)))
            tab_str = self.tabs(ntabs + 1)
            # write the properties
            if self.properties.has_key('foreground'):
                if self.properties['foreground'].startswith('#'):
                    # XRC does not support colors from system settings
                    self.properties['fg'] = self.properties['foreground']
                del self.properties['foreground']
            if self.properties.has_key('background'):
                if self.properties['background'].startswith('#'):
                    # XRC does not support colors from system settings
                    self.properties['bg'] = self.properties['background']
                del self.properties['background']
            if self.properties.has_key('font'):
                font = self.properties['font']
                del self.properties['font']
            else:
                font = None
            style = str(self.properties.get('style', ''))
            if style and style == '0':
                del self.properties['style']

            if 'id' in self.properties:
                del self.properties['id']  # id has no meaning for XRC

            # ALB 2004-12-05
            if 'events' in self.properties:
                #del self.properties['events']  # no event handling in XRC
                for handler, event in self.properties['events'].iteritems():
                    write(tab_str + '<handler event=%s>%s</handler>\n' % \
                          (quoteattr(handler), escape(event)))
                del self.properties['events']

            # 'disabled' property is actually 'enabled' for XRC
            if 'disabled' in self.properties:
                try:
                    val = int(self.properties['disabled'])
                except:
                    val = False
                if val:
                    self.properties['enabled'] = '0'
                del self.properties['disabled']

            # ALB 2007-08-31 extracode property
            if 'extracode' in self.properties:
                write(self.properties['extracode'].replace('\\n', '\n'))
                del self.properties['extracode']

            # custom base classes are ignored for XRC...
            if 'custom_base' in self.properties:
                del self.properties['custom_base']

            if 'extraproperties' in self.properties:
                prop = self.properties['extraproperties']
                del self.properties['extraproperties']
                self.properties.update(prop)

            for name, val in self.properties.iteritems():
                self.write_property(str(name), val, out_file, ntabs + 1)
            # write the font, if present
            if font:
                write(tab_str + '<font>\n')
                tab_str = self.tabs(ntabs + 2)
                for key, val in font.iteritems():
                    if val:
                        write(tab_str + '<%s>%s</%s>\n' % \
                              (escape(key), escape(val), escape(key)))
                write(self.tabs(ntabs + 1) + '</font>\n')
            # write the children
            for c in self.children:
                self.write_child_prologue(c, out_file, ntabs + 1)
                c.write(out_file, ntabs + 1)
                self.write_child_epilogue(c, out_file, ntabs + 1)
            write(self.tabs(ntabs) + '</object>\n')

    # end of class DefaultXrcObject

    class NotImplementedXrcObject(XrcObject):
        """\
        XrcObject used when no code for the widget can be generated (for
        example, because XRC does not currently handle such widget)
        """
        def __init__(self, code_obj):
            XRCCodeWriter.XrcObject.__init__(self)
            self.code_obj = code_obj

        def write(self, outfile, ntabs):
            m = 'code generator for %s objects not available' % \
                self.code_obj.base
            self.warning('%s' % m)
            outfile.write(self.tabs(ntabs) + '<!-- %s -->\n' % m)

    # end of class NotImplementedXrcObject

    def __init__(self):
        BaseCodeWriter.__init__(self)
        # Inject to all classed derivated from WrcObject
        XRCCodeWriter.XrcObject.tabs = self.tabs

    def initialize(self, app_attrs):
        # initialise parent class
        BaseCodeWriter.initialize(self, app_attrs)

        out_path = app_attrs['path']

        if self.multiple_files:
            # for now we handle only single-file code generation
            raise IOError("XRC code cannot be split into multiple files")
        self.output_file_name = out_path
        self.out_file = cStringIO.StringIO()  # open(out_path, 'w')
        self.out_file.write('\n<resource version="2.3.0.1">\n')
        self.curr_tab = 1
        self.xrc_objects = OrderedDict()

    def finalize(self):
        # write the code for every toplevel object
        for obj in self.xrc_objects.itervalues():
            obj.write(self.out_file, 1)
        self.out_file.write('</resource>\n')
        # store the contents to file
        self.save_file(
            self.output_file_name,
            self.out_file.getvalue()
            )

    def add_app(self, app_attrs, top_win_class):
        """\
        In the case of XRC output, there's no wxApp code to generate
        """
        pass

    def add_object(self, unused, sub_obj):
        """\
        Adds the object sub_obj to the XRC tree. The first argument is unused.
        """
        # what we need in XRC is not top_obj, but sub_obj's true parent
        top_obj = sub_obj.parent
        builder = self.obj_builders.get(
            sub_obj.base,
            XRCCodeWriter.DefaultXrcObject
            )
        try:
            # check whether we already created the xrc_obj
            xrc_obj = sub_obj.xrc
        except AttributeError:
            xrc_obj = builder(sub_obj)  # builder functions must return a
                                        # subclass of XrcObject
            sub_obj.xrc = xrc_obj
        else:
            # if we found it, remove it from the self.xrc_objects dictionary
            # (if it was there, i.e. the object is not a sizer), because this
            # isn't a true toplevel object
            if sub_obj in self.xrc_objects:
                del self.xrc_objects[sub_obj]
        # let's see if sub_obj's parent already has an XrcObject: if so, it is
        # temporairly stored in the self.xrc_objects dict...
        if top_obj in self.xrc_objects:
            top_xrc = self.xrc_objects[top_obj]
        else:
            # ...otherwise, create it and store it in the self.xrc_objects dict
            top_xrc = self.obj_builders.get(
                top_obj.base, XRCCodeWriter.DefaultXrcObject)(top_obj)
            top_obj.xrc = top_xrc
            self.xrc_objects[top_obj] = top_xrc
        top_obj.xrc.children.append(xrc_obj)

    def add_sizeritem(self, unused, sizer, obj, option, flag, border):
        """\
        Adds a sizeritem to the XRC tree. The first argument is unused.
        """
        # what we need in XRC is not toplevel, but sub_obj's true parent
        toplevel = obj.parent
        top_xrc = toplevel.xrc
        obj_xrc = obj.xrc
        try:
            sizer_xrc = sizer.xrc
        except AttributeError:
            # if the sizer has not an XrcObject yet, create it now
            sizer_xrc = self.obj_builders.get(
                sizer.base, XRCCodeWriter.DefaultXrcObject)(sizer)
            sizer.xrc = sizer_xrc
        # we now have to move the children from 'toplevel' to 'sizer'
        index = top_xrc.children.index(obj_xrc)
        if obj.klass == 'spacer':
            w = obj.properties.get('width', '0')
            h = obj.properties.get('height', '0')
            obj_xrc = XRCCodeWriter.SpacerXrcObject(
                '%s, %s' % (w, h),
                str(option),
                str(flag),
                str(border)
                )
            sizer.xrc.children.append(obj_xrc)
        else:
            sizeritem_xrc = XRCCodeWriter.SizerItemXrcObject(
                obj_xrc,
                str(option),
                str(flag),
                str(border)
                )
            sizer.xrc.children.append(sizeritem_xrc)
        del top_xrc.children[index]

    def add_class(self, code_obj):
        """\
        Add class behaves very differently for XRC output than for other
        lanaguages (i.e. pyhton): since custom classes are not supported in
        XRC, this has effect only for true toplevel widgets, i.e. frames and
        dialogs. For other kinds of widgets, this is equivalent to add_object
        """
        if not self.xrc_objects.has_key(code_obj):
            builder = self.obj_builders.get(
                code_obj.base,
                XRCCodeWriter.DefaultXrcObject
                )
            xrc_obj = builder(code_obj)
            code_obj.xrc = xrc_obj
            # add the xrc_obj to the dict of the toplevel ones
            self.xrc_objects[code_obj] = xrc_obj
            
    def _format_comment(self, msg):
        return '<!-- %s -->' % escape(msg.rstrip())
Exemple #14
0
class NodeCollection(object):
    """
	节点集合/文档
		添加节点
		初始化context节点
		节点集合合并
		template节点渲染
		生成namespace对象
	"""

    _parser = NodeBuilder()
    handlers = {}

    def __init__(self, nodelist=[], init_space={}):
        self.node = {}  #OrderedDict()
        self.nodelist = OrderedDict()
        self.space = Space(init_space)

        for node in nodelist:
            node.globalspace = self.space
            self.nodelist[node.name] = node

    @classmethod
    def parse(cls, source):
        nodelist = cls._parser(source, cls.handlers)
        return cls(nodelist)

    @property
    def contexts(self):
        return [node for node in self if node.is_context]

    @property
    def templates(self):
        return [node for node in self if node.is_template]

    def fromspace(self, outspace):
        self.space += outspace

    def init_context(self, outspace):
        self.fromspace(outspace)

        for node in self.contexts:
            node.globalspace = self.space
            self.node[node.name] = Space(node.activate())
            self.space += self.node[node.name]

    init = init_context

    def render(self):
        for node in self.templates:
            node.globalspace = self.space
            node.activate()

    def __ilshift__(self, out_collection):
        out_collection.init(self.space)
        # self.space <<= out_collection.init(self.space)

        nodelist = self.nodelist
        for node in out_collection:
            name = node.name
            if name not in nodelist or not nodelist[name].text.strip():
                self.nodelist[name] = node
            else:
                if node.is_context and nodelist[name].is_context:
                    nodelist[name].value <<= node.value
                    self.space += nodelist[name].value

        return self

    def __iter__(self):
        return self.nodelist.itervalues()

    def tospace(self):
        return Space((node.name, node.value) for node in self)
class SectionsFileParser(foundations.io.File):
	"""
	Defines methods to parse sections file format files,
	an alternative configuration file parser is available directly with Python: :class:`ConfigParser.ConfigParser`.

	The parser given by this class has some major differences with Python :class:`ConfigParser.ConfigParser`:

		- | Sections and attributes are stored in their appearance order by default.
			( Using Python :class:`collections.OrderedDict` )
		- | A default section ( **_default** ) will store orphans attributes
			( Attributes appearing before any declared section ).
		- File comments are stored inside the :obj:`SectionsFileParser.comments` class property.
		- | Sections, attributes and values are whitespaces stripped by default
			but can also be stored with their leading and trailing whitespaces.
		- | Values are quotations markers stripped by default
			but can also be stored with their leading and trailing quotations markers.
		- Attributes are namespaced by default allowing sections merge without keys collisions.

	"""

	def __init__(self,
				 file=None,
				 splitters=("=", ":"),
				 namespaceSplitter="|",
				 commentLimiters=(";", "#"),
				 commentMarker="#",
				 quotationMarkers=("\"", "'", "`"),
				 rawSectionContentIdentifier="__raw__",
				 defaultsSection="_defaults",
				 preserveOrder=True):
		"""
		Initializes the class.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse(stripComments=False)
			<foundations.parsers.SectionsFileParser object at 0x293892011>
			>>> sectionsFileParser.sections.keys()
			[u'Section A', u'Section B']
			>>> sectionsFileParser.comments
			OrderedDict([(u'Section A|#0', {u'content': u'Comment.', u'id': 0})])

		:param file: Current file path.
		:type file: unicode
		:param splitters: Splitter characters.
		:type splitters: tuple or list
		:param namespaceSplitter: Namespace splitters character.
		:type namespaceSplitter: unicode
		:param commentLimiters: Comment limiters characters.
		:type commentLimiters: tuple or list
		:param commentMarker: Character use to prefix extracted comments idientifiers.
		:type commentMarker: unicode
		:param quotationMarkers: Quotation markers characters.
		:type quotationMarkers: tuple or list
		:param rawSectionContentIdentifier: Raw section content identifier.
		:type rawSectionContentIdentifier: unicode
		:param defaultsSection: Default section name.
		:type defaultsSection: unicode
		:param preserveOrder: Data order is preserved.
		:type preserveOrder: bool
		"""

		LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))

		foundations.io.File.__init__(self, file)

		# --- Setting class attributes. ---
		self.__splitters = None
		self.splitters = splitters
		self.__namespaceSplitter = None
		self.namespaceSplitter = namespaceSplitter
		self.__commentLimiters = None
		self.commentLimiters = commentLimiters
		self.__commentMarker = None
		self.commentMarker = commentMarker
		self.__quotationMarkers = None
		self.quotationMarkers = quotationMarkers
		self.__rawSectionContentIdentifier = None
		self.rawSectionContentIdentifier = rawSectionContentIdentifier
		self.__defaultsSection = None
		self.defaultsSection = defaultsSection
		self.__preserveOrder = None
		self.preserveOrder = preserveOrder

		if not preserveOrder:
			self.__sections = {}
			self.__comments = {}
		else:
			self.__sections = OrderedDict()
			self.__comments = OrderedDict()
		self.__parsingErrors = []

	#******************************************************************************************************************
	#***	Attributes properties.
	#******************************************************************************************************************
	@property
	def splitters(self):
		"""
		Property for **self.__splitters** attribute.

		:return: self.__splitters.
		:rtype: tuple or list
		"""

		return self.__splitters

	@splitters.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def splitters(self, value):
		"""
		Setter for **self.__splitters** attribute.

		:param value: Attribute value.
		:type value: tuple or list
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
				"splitters", value)
			for element in value:
				assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"splitters", element)
				assert len(element) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("splitter", element)
				assert not re.search(r"\w", element), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
					"splitter", element)
		self.__splitters = value

	@splitters.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def splitters(self):
		"""
		Deleter for **self.__splitters** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "splitters"))

	@property
	def namespaceSplitter(self):
		"""
		Property for **self.__namespaceSplitter** attribute.

		:return: self.__namespaceSplitter.
		:rtype: unicode
		"""

		return self.__namespaceSplitter

	@namespaceSplitter.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def namespaceSplitter(self, value):
		"""
		Setter for **self.__namespaceSplitter** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"namespaceSplitter", value)
			assert len(value) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("namespaceSplitter",
																							  value)
			assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
				"namespaceSplitter", value)
		self.__namespaceSplitter = value

	@namespaceSplitter.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def namespaceSplitter(self):
		"""
		Deleter for **self.__namespaceSplitter** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "namespaceSplitter"))

	@property
	def commentLimiters(self):
		"""
		Property for **self.__commentLimiters** attribute.

		:return: self.__commentLimiters.
		:rtype: tuple or list
		"""

		return self.__commentLimiters

	@commentLimiters.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def commentLimiters(self, value):
		"""
		Setter for **self.__commentLimiters** attribute.

		:param value: Attribute value.
		:type value: tuple or list
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
				"commentLimiters", value)
			for element in value:
				assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"commentLimiters", element)
		self.__commentLimiters = value

	@commentLimiters.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def commentLimiters(self):
		"""
		Deleter for **self.__commentLimiters** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "commentLimiters"))

	@property
	def commentMarker(self):
		"""
		Property for **self.__commentMarker** attribute.

		:return: self.__commentMarker.
		:rtype: unicode
		"""

		return self.__commentMarker

	@commentMarker.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def commentMarker(self, value):
		"""
		Setter for **self.__commentMarker** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"commentMarker", value)
			assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
				"commentMarker", value)
		self.__commentMarker = value

	@commentMarker.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def commentMarker(self):
		"""
		Deleter for **self.__commentMarker** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "commentMarker"))

	@property
	def quotationMarkers(self):
		"""
		Property for **self.__quotationMarkers** attribute.

		:return: self.__quotationMarkers.
		:rtype: tuple or list
		"""

		return self.__quotationMarkers

	@quotationMarkers.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def quotationMarkers(self, value):
		"""
		Setter for **self.__quotationMarkers** attribute.

		:param value: Attribute value.
		:type value: tuple or list
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
				"quotationMarkers", value)
			for element in value:
				assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"quotationMarkers", element)
				assert len(element) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("quotationMarkers",
																									element)
				assert not re.search(r"\w", element), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
					"quotationMarkers", element)
		self.__quotationMarkers = value

	@quotationMarkers.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def quotationMarkers(self):
		"""
		Deleter for **self.__quotationMarkers** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "quotationMarkers"))

	@property
	def rawSectionContentIdentifier(self):
		"""
		Property for **self. __rawSectionContentIdentifier** attribute.

		:return: self.__rawSectionContentIdentifier.
		:rtype: unicode
		"""

		return self.__rawSectionContentIdentifier

	@rawSectionContentIdentifier.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def rawSectionContentIdentifier(self, value):
		"""
		Setter for **self. __rawSectionContentIdentifier** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"rawSectionContentIdentifier", value)
		self.__rawSectionContentIdentifier = value

	@rawSectionContentIdentifier.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def rawSectionContentIdentifier(self):
		"""
		Deleter for **self. __rawSectionContentIdentifier** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "rawSectionContentIdentifier"))

	@property
	def defaultsSection(self):
		"""
		Property for **self.__defaultsSection** attribute.

		:return: self.__defaultsSection.
		:rtype: unicode
		"""

		return self.__defaultsSection

	@defaultsSection.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultsSection(self, value):
		"""
		Setter for **self.__defaultsSection** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"defaultsSection", value)
		self.__defaultsSection = value

	@defaultsSection.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultsSection(self):
		"""
		Deleter for **self.__defaultsSection** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultsSection"))

	@property
	def sections(self):
		"""
		Property for **self.__sections** attribute.

		:return: self.__sections.
		:rtype: OrderedDict or dict
		"""

		return self.__sections

	@sections.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def sections(self, value):
		"""
		Setter for **self.__sections** attribute.

		:param value: Attribute value.
		:type value: OrderedDict or dict
		"""

		if value is not None:
			assert type(value) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
			'OrderedDict' or 'dict'!".format("sections", value)
			for key, element in value.iteritems():
				assert type(key) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"sections", key)
				assert type(element) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
				'OrderedDict' or 'dict'!".format("sections", key)
		self.__sections = value

	@sections.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def sections(self):
		"""
		Deleter for **self.__sections** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "sections"))

	@property
	def comments(self):
		"""
		Property for **self.__comments** attribute.

		:return: self.__comments.
		:rtype: OrderedDict or dict
		"""

		return self.__comments

	@comments.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def comments(self, value):
		"""
		Setter for **self.__comments** attribute.

		:param value: Attribute value.
		:type value: OrderedDict or dict
		"""

		if value is not None:
			assert type(value) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
			'OrderedDict' or 'dict'!".format("comments", value)
			for key, element in value.iteritems():
				assert type(key) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"comments", key)
				assert type(element) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
				'OrderedDict' or 'dict'!".format("comments", key)
		self.__comments = value

	@comments.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def comments(self):
		"""
		Deleter for **self.__comments** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "comments"))

	@property
	def parsingErrors(self):
		"""
		Property for **self.__parsingErrors** attribute.

		:return: self.__parsingErrors.
		:rtype: list
		"""

		return self.__parsingErrors

	@parsingErrors.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def parsingErrors(self, value):
		"""
		Setter for **self.__parsingErrors** attribute.

		:param value: Attribute value.
		:type value: list
		"""

		if value is not None:
			assert type(value) is list, "'{0}' attribute: '{1}' type is not 'list'!".format("parsingErrors", value)
			for element in value:
				assert issubclass(element.__class__, foundations.exceptions.AbstractParsingError), \
					"'{0}' attribute: '{1}' is not a '{2}' subclass!".format(
						"parsingErrors", element, foundations.exceptions.AbstractParsingError.__class__.__name__)
		self.__parsingErrors = value

	@parsingErrors.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def parsingErrors(self):
		"""
		Deleter for **self.__parsingErrors** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "parsingErrors"))

	@property
	def preserveOrder(self):
		"""
		Property for **self.__preserveOrder** attribute.

		:return: self.__preserveOrder.
		:rtype: bool
		"""

		return self.__preserveOrder

	@preserveOrder.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def preserveOrder(self, value):
		"""
		Setter method for **self.__preserveOrder** attribute.

		:param value: Attribute value.
		:type value: bool
		"""

		if value is not None:
			assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("preserveOrder", value)
		self.__preserveOrder = value

	@preserveOrder.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def preserveOrder(self):
		"""
		Deleter method for **self.__preserveOrder** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "preserveOrder"))

	#******************************************************************************************************************
	#***	Class methods.
	#******************************************************************************************************************
	def __getitem__(self, section):
		"""
		Reimplements the :meth:`object.__getitem__` method.

		:param section: Section name.
		:type section: unicode
		:return: Layout.
		:rtype: Layout
		"""

		return self.__sections.__getitem__(section)

	def __setitem__(self, section, value):
		"""
		Reimplements the :meth:`object.__getitem__` method.

		:param section: Section name.
		:type section: unicode
		:param section: Value.
		:type section: dict
		:return: Layout.
		:rtype: Layout
		"""

		return self.__sections.__setitem__(section, value)

	def __iter__(self):
		"""
		Reimplements the :meth:`object.__iter__` method.

		:return: Layouts iterator.
		:rtype: object
		"""

		return self.__sections.iteritems()

	def __contains__(self, section):
		"""
		Reimplements the :meth:`object.__contains__` method.

		:param section: Section name.
		:type section: unicode
		:return: Section existence.
		:rtype: bool
		"""

		return self.sectionExists(section)

	def __len__(self):
		"""
		Reimplements the :meth:`object.__len__` method.

		:return: Sections count.
		:rtype: int
		"""

		return len(self.__sections)

	@foundations.exceptions.handleExceptions(foundations.exceptions.FileStructureParsingError)
	def parse(self,
			  rawSections=None,
			  namespaces=True,
			  stripComments=True,
			  stripWhitespaces=True,
			  stripQuotationMarkers=True,
			  raiseParsingErrors=True):
		"""
		Process the file content and extracts the sections / attributes
			as nested :class:`collections.OrderedDict` dictionaries or dictionaries.

		Usage::

			>>> content = ["; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse(stripComments=False)
			<foundations.parsers.SectionsFileParser object at 0x860323123>
			>>> sectionsFileParser.sections.keys()
			[u'_defaults']
			>>> sectionsFileParser.sections["_defaults"].values()
			[u'Value A', u'Value B']
			>>> sectionsFileParser.parse(stripComments=False, stripQuotationMarkers=False)
			<foundations.parsers.SectionsFileParser object at 0x860323123>
			>>> sectionsFileParser.sections["_defaults"].values()
			[u'"Value A"', u'"Value B"']
			>>> sectionsFileParser.comments
			OrderedDict([(u'_defaults|#0', {u'content': u'Comment.', u'id': 0})])
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x860323123>
			>>> sectionsFileParser.sections["_defaults"]
			OrderedDict([(u'_defaults|Attribute 1', u'Value A'), (u'_defaults|Attribute 2', u'Value B')])
			>>> sectionsFileParser.parse(namespaces=False)
			<foundations.parsers.SectionsFileParser object at 0x860323123>
			>>> sectionsFileParser.sections["_defaults"]
			OrderedDict([(u'Attribute 1', u'Value A'), (u'Attribute 2', u'Value B')])

		:param rawSections: Ignored raw sections.
		:type rawSections: tuple or list
		:param namespaces: Attributes and comments are namespaced.
		:type namespaces: bool
		:param stripComments: Comments are stripped.
		:type stripComments: bool
		:param stripWhitespaces: Whitespaces are stripped.
		:type stripWhitespaces: bool
		:param stripQuotationMarkers: Attributes values quotation markers are stripped.
		:type stripQuotationMarkers: bool
		:param raiseParsingErrors: Raise parsing errors.
		:type raiseParsingErrors: bool
		:return: SectionFileParser instance.
		:rtype: SectionFileParser
		"""

		LOGGER.debug("> Reading sections from: '{0}'.".format(self.path))

		if not self.content:
			self.read()

		attributes = {} if not self.__preserveOrder else OrderedDict()
		section = self.__defaultsSection
		rawSections = rawSections or []

		commentId = 0
		for i, line in enumerate(self.content):
			# Comments matching.
			search = re.search(r"^\s*[{0}](?P<comment>.+)$".format("".join(self.__commentLimiters)), line)
			if search:
				if not stripComments:
					comment = namespaces and foundations.namespace.setNamespace(section, "{0}{1}".format(
						self.__commentMarker, commentId), self.__namespaceSplitter) or \
							  "{0}{1}".format(self.__commentMarker, commentId)
					self.__comments[comment] = {"id": commentId, "content": stripWhitespaces and \
																			search.group(
																				"comment").strip() or search.group(
						"comment")}
					commentId += 1
				continue

			# Sections matching.
			search = re.search(r"^\s*\[(?P<section>.+)\]\s*$", line)
			if search:
				section = stripWhitespaces and search.group("section").strip() or search.group("section")
				if not self.__preserveOrder:
					attributes = {}
				else:
					attributes = OrderedDict()
				rawContent = []
				continue

			if section in rawSections:
				rawContent.append(line)
				attributes[self.__rawSectionContentIdentifier] = rawContent
			else:
				# Empty line matching.
				search = re.search(r"^\s*$", line)
				if search:
					continue

				# Attributes matching.
				search = re.search(r"^(?P<attribute>.+?)[{0}](?P<value>.+)$".format("".join(self.__splitters)), line) \
					or re.search(r"^(?P<attribute>.+?)[{0}]\s*$".format("".join(self.__splitters)), line)
				if search:
					attribute = search.group("attribute").strip() if stripWhitespaces else search.group("attribute")
					attribute = foundations.namespace.setNamespace(section, attribute, self.__namespaceSplitter) \
						if namespaces else attribute

					if len(search.groups()) == 2:
						value = search.group("value").strip() if stripWhitespaces else search.group("value")
						attributes[attribute] = value.strip("".join(self.__quotationMarkers)) \
							if stripQuotationMarkers else value
					else:
						attributes[attribute] = None
				else:
					self.__parsingErrors.append(foundations.exceptions.AttributeStructureParsingError(
						"Attribute structure is invalid: {0}".format(line), i + 1))

			self.__sections[section] = attributes

		LOGGER.debug("> Sections: '{0}'.".format(self.__sections))
		LOGGER.debug("> '{0}' file parsing done!".format(self.path))

		if self.__parsingErrors and raiseParsingErrors:
			raise foundations.exceptions.FileStructureParsingError(
				"{0} | '{1}' structure is invalid, parsing exceptions occured!".format(self.__class__.__name__,
																					   self.path))

		return self

	def sectionExists(self, section):
		"""
		Checks if given section exists.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x845683844>
			>>> sectionsFileParser.sectionExists("Section A")
			True
			>>> sectionsFileParser.sectionExists("Section C")
			False

		:param section: Section to check existence.
		:type section: unicode
		:return: Section existence.
		:rtype: bool
		"""

		if section in self.__sections:
			LOGGER.debug("> '{0}' section exists in '{1}'.".format(section, self))
			return True
		else:
			LOGGER.debug("> '{0}' section doesn't exists in '{1}'.".format(section, self))
			return False

	def attributeExists(self, attribute, section):
		"""
		Checks if given attribute exists.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x234564563>
			>>> sectionsFileParser.attributeExists("Attribute 1", "Section A")
			True
			>>> sectionsFileParser.attributeExists("Attribute 2", "Section A")
			False

		:param attribute: Attribute to check existence.
		:type attribute: unicode
		:param section: Section to search attribute into.
		:type section: unicode
		:return: Attribute existence.
		:rtype: bool
		"""

		if foundations.namespace.removeNamespace(attribute, rootOnly=True) in self.getAttributes(section,
																								 stripNamespaces=True):
			LOGGER.debug("> '{0}' attribute exists in '{1}' section.".format(attribute, section))
			return True
		else:
			LOGGER.debug("> '{0}' attribute doesn't exists in '{1}' section.".format(attribute, section))
			return False

	def getAttributes(self, section, stripNamespaces=False):
		"""
		Returns given section attributes.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x125698322>
			>>> sectionsFileParser.getAttributes("Section A")
			OrderedDict([(u'Section A|Attribute 1', u'Value A')])
			>>> sectionsFileParser.preserveOrder=False
			>>> sectionsFileParser.getAttributes("Section A")
			{u'Section A|Attribute 1': u'Value A'}
			>>> sectionsFileParser.preserveOrder=True
			>>> sectionsFileParser.getAttributes("Section A", stripNamespaces=True)
			OrderedDict([(u'Attribute 1', u'Value A')])

		:param section: Section containing the requested attributes.
		:type section: unicode
		:param stripNamespaces: Strip namespaces while retrieving attributes.
		:type stripNamespaces: bool
		:return: Attributes.
		:rtype: OrderedDict or dict
		"""

		LOGGER.debug("> Getting section '{0}' attributes.".format(section))

		attributes = OrderedDict() if self.__preserveOrder else dict()
		if not self.sectionExists(section):
			return attributes

		if stripNamespaces:
			for attribute, value in self.__sections[section].iteritems():
				attributes[foundations.namespace.removeNamespace(attribute, rootOnly=True)] = value
		else:
			attributes.update(self.__sections[section])
		LOGGER.debug("> Attributes: '{0}'.".format(attributes))
		return attributes

	def getAllAttributes(self):
		"""
		Returns all sections attributes.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x845683844>
			>>> sectionsFileParser.getAllAttributes()
			OrderedDict([(u'Section A|Attribute 1', u'Value A'), (u'Section B|Attribute 2', u'Value B')])
			>>> sectionsFileParser.preserveOrder=False
			>>> sectionsFileParser.getAllAttributes()
			{u'Section B|Attribute 2': u'Value B', u'Section A|Attribute 1': u'Value A'}

		:return: All sections / files attributes.
		:rtype: OrderedDict or dict
		"""

		allAttributes = OrderedDict() if self.__preserveOrder else dict()

		for attributes in self.__sections.itervalues():
			for attribute, value in attributes.iteritems():
				allAttributes[attribute] = value
		return allAttributes

	@foundations.exceptions.handleExceptions(foundations.exceptions.FileStructureParsingError)
	def getValue(self, attribute, section, default=""):
		"""
		Returns requested attribute value.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x679302423>
			>>> sectionsFileParser.getValue("Attribute 1", "Section A")
			u'Value A'

		:param attribute: Attribute name.
		:type attribute: unicode
		:param section: Section containing the searched attribute.
		:type section: unicode
		:param default: Default return value.
		:type default: object
		:return: Attribute value.
		:rtype: unicode
		"""

		if not self.attributeExists(attribute, section):
			return default

		if attribute in self.__sections[section]:
			value = self.__sections[section][attribute]
		elif foundations.namespace.setNamespace(section, attribute) in self.__sections[section]:
			value = self.__sections[section][foundations.namespace.setNamespace(section, attribute)]
		LOGGER.debug("> Attribute: '{0}', value: '{1}'.".format(attribute, value))
		return value

	def setValue(self, attribute, section, value):
		"""
		Sets requested attribute value.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x109304209>
			>>> sectionsFileParser.setValue("Attribute 3", "Section C", "Value C")
			True

		:param attribute: Attribute name.
		:type attribute: unicode
		:param section: Section containing the searched attribute.
		:type section: unicode
		:param value: Attribute value.
		:type value: object
		:return: Definition success.
		:rtype: bool
		"""

		if not self.sectionExists(section):
			LOGGER.debug("> Adding '{0}' section.".format(section))
			self.__sections[section] = OrderedDict() if self.__preserveOrder else dict()

		self.__sections[section][attribute] = value

		return True

	def write(self,
			  namespaces=False,
			  splitter="=",
			  commentLimiter=(";"),
			  spacesAroundSplitter=True,
			  spaceAfterCommentLimiter=True):
		"""
		Writes defined file using :obj:`SectionsFileParser.sections` and
			:obj:`SectionsFileParser.comments` class properties content.

		Usage::

			>>> sections = {"Section A": {"Section A|Attribute 1": "Value A"}, \
"Section B": {"Section B|Attribute 2": "Value B"}}
			>>> sectionsFileParser = SectionsFileParser("SectionsFile.rc")
			>>> sectionsFileParser.sections = sections
			>>> sectionsFileParser.write()
			True
			>>> sectionsFileParser.read()
			u'[Section A]\\nAttribute 1 = Value A\\n\\n[Section B]\\nAttribute 2 = Value B\\n'

		:param namespaces: Attributes are namespaced.
		:type namespaces: bool
		:param splitter: Splitter character.
		:type splitter: unicode
		:param commentLimiter: Comment limiter character.
		:type commentLimiter: unicode
		:param spacesAroundSplitter: Spaces around attributes and value splitters.
		:type spacesAroundSplitter: bool
		:param spaceAfterCommentLimiter: Space after comments limiter.
		:type spaceAfterCommentLimiter: bool
		:return: Method success.
		:rtype: bool
		"""

		self.uncache()

		LOGGER.debug("> Setting '{0}' file content.".format(self.path))
		attributeTemplate = "{{0}} {0} {{1}}\n".format(splitter) if spacesAroundSplitter else \
							"{{0}}{0}{{1}}\n".format(splitter)
		attributeTemplate = foundations.strings.replace(attributeTemplate, {"{{" : "{", "}}" : "}"})
		commentTemplate = spaceAfterCommentLimiter and "{0} {{0}}\n".format(commentLimiter) or \
						  "{0}{{0}}\n".format(commentLimiter)
		if self.__defaultsSection in self.__sections:
			LOGGER.debug("> Appending '{0}' default section.".format(self.__defaultsSection))
			if self.__comments:
				for comment, value in self.__comments.iteritems():
					if self.__defaultsSection in comment:
						value = value["content"] or ""
						LOGGER.debug("> Appending '{0}' comment with '{1}' value.".format(comment, value))
						self.content.append(commentTemplate.format(value))
			for attribute, value in self.__sections[self.__defaultsSection].iteritems():
				attribute = namespaces and attribute or foundations.namespace.removeNamespace(attribute,
																							  self.__namespaceSplitter,
																							  rootOnly=True)
				value = value or ""
				LOGGER.debug("> Appending '{0}' attribute with '{1}' value.".format(attribute, value))
				self.content.append(attributeTemplate.format(attribute, value))
			self.content.append("\n")

		for i, section in enumerate(self.__sections):
			LOGGER.debug("> Appending '{0}' section.".format(section))
			self.content.append("[{0}]\n".format(section))
			if self.__comments:
				for comment, value in self.__comments.iteritems():
					if section in comment:
						value = value["content"] or ""
						LOGGER.debug("> Appending '{0}' comment with '{1}' value.".format(comment, value))
						self.content.append(commentTemplate.format(value))
			for attribute, value in self.__sections[section].iteritems():
				if foundations.namespace.removeNamespace(attribute) == self.__rawSectionContentIdentifier:
					LOGGER.debug("> Appending '{0}' raw section content.".format(section))
					for line in value:
						self.content.append(line)
				else:
					LOGGER.debug("> Appending '{0}' section.".format(section))
					attribute = namespaces and attribute or foundations.namespace.removeNamespace(attribute,
																								  self.__namespaceSplitter,
																								  rootOnly=True)
					value = value or ""
					LOGGER.debug("> Appending '{0}' attribute with '{1}' value.".format(attribute, value))
					self.content.append(attributeTemplate.format(attribute, value))
			if i != len(self.__sections) - 1:
				self.content.append("\n")
		foundations.io.File.write(self)
		return True
class Autorunner(object):
  def __init__(self, autorun_dir, downloader, concurrent_items, address, port, enable_web_server):
    self.projects_dir = os.path.join(autorun_dir, 'projects')
    self.versioned_dir = os.path.join(autorun_dir, 'versioned_projects')
    self.data_dir = os.path.join(autorun_dir, 'data')
    self.downloader = downloader
    self.concurrent_items = concurrent_items
    self.address = address
    self.port = port
    self.enable_web_server = enable_web_server
    
    # disable the password prompts
    self.gitenv = dict( os.environ.items() + { 'GIT_ASKPASS': '******', 'SSH_ASKPASS': '******' }.items() )
    
    stop_file = os.path.join(autorun_dir, 'STOP')
    self.runner = PrintRunner(concurrent_items=self.concurrent_items, stop_file=stop_file)
    self.runner.on_finish += self.handle_runner_finish
        
    self.current_project_name = None
    self.current_project = None
    
    self.selected_project = None
    
    self.projects = {}
    self.installed_projects = set()
    self.failed_projects = set()
    
    self.on_projects_loaded = Event()
    self.on_project_installing = Event()
    self.on_project_installed = Event()
    self.on_project_installation_failed = Event()
    self.on_project_refresh = Event()
    self.on_project_selected = Event()
    self.on_status = Event()
    
    self.http_client = AsyncHTTPClient()
    
    self.installing = False
    self.shut_down_flag = False
    
    self.project_updater = ioloop.PeriodicCallback(self.update_projects, 10*60*1000)
    self.forced_stop_timeout = None
  
  @gen.engine
  def update_projects(self):
    response = yield gen.Task(self.http_client.fetch, URL, method="GET")
    if response.code == 200:
      data = json.loads(response.body)
      if StrictVersion(seesaw.__version__) < StrictVersion(data["warrior"]["seesaw_version"]):
        # time for an update
        print "There's a new version of Seesaw, you should update."
        self.stop_gracefully()
        
        # schedule a forced reboot after two days
        self.schedule_forced_stop()
        return
      
      projects_list = data["projects"]
      self.projects = OrderedDict([ (project["name"], project) for project in projects_list ])
      for project_data in self.projects.itervalues():
        if "deadline" in project_data:
          project_data["deadline_int"] = time.mktime(time.strptime(project_data["deadline"], "%Y-%m-%dT%H:%M:%SZ"))
      
      # ArchiveTeam's choice
      if "auto_project" in data:
        self.select_project(data["auto_project"])
      else:
        self.select_project(None)
      
      self.on_projects_loaded(self, self.projects)
      
      if self.selected_project and (yield gen.Task(self.check_project_has_update, self.selected_project)):
        # restart project if it needs an update
        self.start_selected_project()
    
    else:
      print "HTTP error %s" % (response.code)
  
  @gen.engine
  def install_project(self, project_name, callback=None):
    self.installed_projects.discard(project_name)
    
    if project_name in self.projects and not self.installing:
      self.installing = project_name
      self.install_output = []
      
      project = self.projects[project_name]
      project_path = os.path.join(self.projects_dir, project_name)
      
      self.on_project_installing(self, project)
      
      if project_name in self.failed_projects:
        if os.path.exists(project_path):
          shutil.rmtree(project_path)
        self.failed_projects.discard(project_name)
        
      if os.path.exists(project_path):
        subprocess.Popen(
            args=[ "git", "config", "remote.origin.url", project["repository"] ],
            cwd=project_path
        ).communicate()
        
        p = AsyncPopen(
            args=[ "git", "pull" ],
            cwd=project_path,
            env=self.gitenv
        )
      else:
        p = AsyncPopen(
            args=[ "git", "clone", project["repository"], project_path ],
            env=self.gitenv
        )
      p.on_output += self.collect_install_output
      p.on_end += yield gen.Callback("gitend")
      p.run()
      result = yield gen.Wait("gitend")
      
      if result != 0:
        self.install_output.append("\ngit returned %d\n" % result)
        self.on_project_installation_failed(self, project, "".join(self.install_output))
        self.installing = None
        self.failed_projects.add(project_name)
        if callback:
          callback(False)
        return
      
      project_install_file = os.path.join(project_path, "warrior-install.sh")
      
      if os.path.exists(project_install_file):
        p = AsyncPopen(
            args=[ project_install_file ],
            cwd=project_path
        )
        p.on_output += self.collect_install_output
        p.on_end += yield gen.Callback("installend")
        p.run()
        result = yield gen.Wait("installend")
        
        if result != 0:
          self.install_output.append("\nCustom installer returned %d\n" % result)
          self.on_project_installation_failed(self, project, "".join(self.install_output))
          self.installing = None
          self.failed_projects.add(project_name)
          if callback:
            callback(False)
          return
      
      data_dir = self.data_dir
      if os.path.exists(data_dir):
        shutil.rmtree(data_dir)
      os.makedirs(data_dir)
      
      project_data_dir = os.path.join(project_path, "data")
      if os.path.islink(project_data_dir):
        os.remove(project_data_dir)
      elif os.path.isdir(project_data_dir):
        shutil.rmtree(project_data_dir)
      os.symlink(data_dir, project_data_dir)
      
      self.installed_projects.add(project_name)
      self.on_project_installed(self, project, "".join(self.install_output))
      
      self.installing = None
      if callback:
        callback(True)
  
  @gen.engine
  def check_project_has_update(self, project_name, callback):
    if project_name in self.projects:
      project = self.projects[project_name]
      project_path = os.path.join(self.projects_dir, project_name)
      
      self.install_output = []
      
      if not os.path.exists(project_path):
        callback(True)
        return
        
      subprocess.Popen(
          args=[ "git", "config", "remote.origin.url", project["repository"] ],
          cwd=project_path
      ).communicate()
      
      p = AsyncPopen(
          args=[ "git", "fetch" ],
          cwd=project_path,
          env=self.gitenv
      )
      p.on_output += self.collect_install_output
      p.on_end += yield gen.Callback("gitend")
      p.run()
      result = yield gen.Wait("gitend")
      
      if result != 0:
        callback(True)
        return
      
      output = subprocess.Popen(
          args=[ "git", "rev-list", "HEAD..FETCH_HEAD" ],
          cwd=project_path,
          stdout=subprocess.PIPE
      ).communicate()[0]
      if output.strip() != "":
        callback(True)
      else:
        callback(False)
  
  def collect_install_output(self, data):
    sys.stdout.write(data)
    data = re.sub("[\x00-\x08\x0b\x0c]", "", data)
    self.install_output.append(data)
  
  @gen.engine
  def select_project(self, project_name):
    if project_name == "auto":
      self.update_projects()
      return
    
    if not project_name in self.projects:
      project_name = None
    
    if project_name != self.selected_project:
      # restart
      self.selected_project = project_name
      self.on_project_selected(self, project_name)
      self.start_selected_project()
  
  def clone_project(self, project_name, project_path):
    version_string = subprocess.Popen(
        args=[ "git", "log", "-1", "--pretty=%h" ],
        cwd=project_path,
        stdout=subprocess.PIPE
    ).communicate()[0].strip()
    
    project_versioned_path = os.path.join(self.versioned_dir, "%s-%s" % (project_name, version_string))
    if not os.path.exists(project_versioned_path):
      if not os.path.exists(self.versioned_dir):
        os.makedirs(self.versioned_dir)
      
      subprocess.Popen(
          args=[ "git", "clone", project_path, project_versioned_path ],
          env=self.gitenv
      ).communicate()
    
    return project_versioned_path
  
  def load_pipeline(self, pipeline_path, context):
    dirname, basename = os.path.split(pipeline_path)
    if dirname == "":
      dirname = "."
    
    with open(pipeline_path) as f:
      pipeline_str = f.read()
    
    local_context = context
    global_context = context
    curdir = os.getcwd()
    try:
      os.chdir(dirname)
      exec pipeline_str in local_context, global_context
    finally:
      os.chdir(curdir)
    
    return ( local_context["project"], local_context["pipeline"] )
  
  @gen.engine
  def start_selected_project(self):
    project_name = self.selected_project
    
    if project_name in self.projects:
      # install or update project if necessary
      if not project_name in self.installed_projects or (yield gen.Task(self.check_project_has_update, project_name)):
        result = yield gen.Task(self.install_project, project_name)
        if not result:
          return
      
      # the path with the project code
      # (this is the most recent code from the repository)
      project_path = os.path.join(self.projects_dir, project_name)
      
      # clone the project code to a versioned directory
      # where the pipeline is actually run
      project_versioned_path = self.clone_project(project_name, project_path)
      
      # load the pipeline from the versioned directory
      pipeline_path = os.path.join(project_versioned_path, "pipeline.py")
      (project, pipeline) = self.load_pipeline(pipeline_path, { "downloader": self.downloader })
      
      print pipeline
      
      # start the pipeline
      if not self.shut_down_flag:
        self.runner.set_current_pipeline(pipeline)
      
      if self.enable_web_server:
        start_runner_server(project, self.runner, bind_address=self.address, port_number=self.port)
      
      self.current_project_name = project_name
      self.current_project = project
      
      self.on_project_refresh(self, self.current_project, self.runner)
      self.fire_status()
      
      if not self.shut_down_flag:
        self.runner.start()
    
    else:
      # project_name not in self.projects,
      # stop the current project (if there is one)
      self.runner.set_current_pipeline(None)
      self.fire_status()
  
  def handle_runner_finish(self, runner):
    self.current_project_name = None
    self.current_project = None
    
    self.on_project_refresh(self, self.current_project, self.runner)
    self.fire_status()
    
    if self.shut_down_flag:
      ioloop.IOLoop.instance().stop()
      
      if self.shut_down_flag:
        sys.exit()
  
  def start(self):
    self.project_updater.start()
    self.update_projects()
    ioloop.IOLoop.instance().start()
  
  def schedule_forced_stop(self):
    if not self.forced_stop_timeout:
      self.forced_stop_timeout = ioloop.IOLoop.instance().add_timeout(datetime.timedelta(days=2), self.forced_stop)
  
  def forced_stop(self):
    sys.exit(1)
  
  def stop_gracefully(self):
    self.shut_down_flag = True
    self.fire_status()
    if self.runner.is_active():
      self.runner.set_current_pipeline(None)
    else:
      ioloop.IOLoop.instance().stop()
      sys.exit()
  
  def keep_running(self):
    self.shut_down_flag = False
    self.start_selected_project()
    self.fire_status()
  
  class Status(object):
    NO_PROJECT = "NO_PROJECT"
    INVALID_SETTINGS = "INVALID_SETTINGS"
    STOPPING_PROJECT = "STOPPING_PROJECT"
    RESTARTING_PROJECT = "RESTARTING_PROJECT"
    RUNNING_PROJECT = "RUNNING_PROJECT"
    SWITCHING_PROJECT = "SWITCHING_PROJECT"
    STARTING_PROJECT = "STARTING_PROJECT"
    SHUTTING_DOWN = "SHUTTING_DOWN"
    REBOOTING = "REBOOTING"
  
  def fire_status(self):
    self.on_status(self, self.warrior_status())
  
  def warrior_status(self):
    if self.shut_down_flag:
      return Autorunner.Status.SHUTTING_DOWN
    elif self.selected_project == None and self.current_project_name == None:
      return Autorunner.Status.NO_PROJECT
    elif self.selected_project:
      if self.selected_project == self.current_project_name:
        return Autorunner.Status.RUNNING_PROJECT
      else:
        return Autorunner.Status.STARTING_PROJECT
    else:
      return Autorunner.Status.STOPPING_PROJECT
 def apply(self):
     """Transform each
     :class:`~sphinxcontrib.bibtex.nodes.bibliography` node into a
     list of citations.
     """
     env = self.document.settings.env
     for bibnode in self.document.traverse(bibliography):
         # get the information of this bibliography node
         # by looking up its id in the bibliography cache
         id_ = bibnode['ids'][0]
         infos = [info for other_id, info
                  in env.bibtex_cache.bibliographies.iteritems()
                  if other_id == id_ and info.docname == env.docname]
         assert infos, "no bibliography id '%s' in %s" % (
             id_, env.docname)
         assert len(infos) == 1, "duplicate bibliography ids '%s' in %s" % (
             id_, env.docname)
         info = infos[0]
         # generate entries
         entries = OrderedDict()
         for bibfile in info.bibfiles:
             # XXX entries are modified below in an unpickable way
             # XXX so fetch a deep copy
             data = env.bibtex_cache.bibfiles[bibfile].data
             for entry in data.entries.itervalues():
                 visitor = FilterVisitor(
                     entry=entry,
                     is_cited=env.bibtex_cache.is_cited(entry.key))
                 try:
                     ok = visitor.visit(info.filter_)
                 except ValueError as e:
                     env.app.warn(
                         "syntax error in :filter: expression; %s" %
                         e)
                     # recover by falling back to the default
                     ok = env.bibtex_cache.is_cited(entry.key)
                 if ok:
                     entries[entry.key] = copy.deepcopy(entry)
         # order entries according to which were cited first
         # first, we add all keys that were cited
         # then, we add all remaining keys
         sorted_entries = []
         for key in env.bibtex_cache.get_all_cited_keys():
             try:
                 entry = entries.pop(key)
             except KeyError:
                 pass
             else:
                 sorted_entries.append(entry)
         sorted_entries += entries.itervalues()
         # locate and instantiate style and backend plugins
         style = find_plugin('pybtex.style.formatting', info.style)()
         backend = find_plugin('pybtex.backends', 'docutils')()
         # create citation nodes for all references
         if info.list_ == "enumerated":
             nodes = docutils.nodes.enumerated_list()
             nodes['enumtype'] = info.enumtype
             if info.start >= 1:
                 nodes['start'] = info.start
                 env.bibtex_cache.set_enum_count(env.docname, info.start)
             else:
                 nodes['start'] = env.bibtex_cache.get_enum_count(
                     env.docname)
         elif info.list_ == "bullet":
             nodes = docutils.nodes.bullet_list()
         else:  # "citation"
             nodes = docutils.nodes.paragraph()
         # XXX style.format_entries modifies entries in unpickable way
         for entry in style.format_entries(sorted_entries):
             if info.list_ == "enumerated" or info.list_ == "bullet":
                 citation = docutils.nodes.list_item()
                 citation += entry.text.render(backend)
             else:  # "citation"
                 citation = backend.citation(entry, self.document)
                 # backend.citation(...) uses entry.key as citation label
                 # we change it to entry.label later onwards
                 # but we must note the entry.label now;
                 # at this point, we also already prefix the label
                 key = citation[0].astext()
                 info.labels[key] = info.labelprefix + entry.label
             node_text_transform(citation, transform_url_command)
             if info.curly_bracket_strip:
                 node_text_transform(
                     citation,
                     transform_curly_bracket_strip)
             nodes += citation
             if info.list_ == "enumerated":
                 env.bibtex_cache.inc_enum_count(env.docname)
         bibnode.replace_self(nodes)
Exemple #18
0
class NodeCollection(object):
	"""
	节点集合/文档
		添加节点
		初始化context节点
		节点集合合并
		template节点渲染
		生成namespace对象
	"""

	_parser = NodeBuilder()
	handlers = {}

	def __init__(self, nodelist = [], init_space = {}):
		self.node = {} #OrderedDict()
		self.nodelist = OrderedDict()
		self.space = Space(init_space)

		for node in nodelist:
			node.globalspace = self.space
			self.nodelist[node.name] = node

	@classmethod
	def parse(cls, source):
		nodelist = cls._parser(source, cls.handlers)
		return cls(nodelist)
	
	@property
	def contexts(self):
		return [node for node in self if node.is_context]
	
	@property
	def templates(self):
		return [node for node in self if node.is_template]

	def fromspace(self, outspace):
		self.space += outspace
	
	def init_context(self, outspace):
		self.fromspace(outspace)

		for node in self.contexts:
			node.globalspace = self.space
			self.node[node.name] = Space(node.activate())
			self.space += self.node[node.name]
	
	init = init_context

	def render(self):
		for node in self.templates:
			node.globalspace = self.space
			node.activate()


	def __ilshift__(self, out_collection):
		out_collection.init(self.space)
		# self.space <<= out_collection.init(self.space)

		nodelist = self.nodelist
		for node in out_collection:
			name = node.name
			if name not in nodelist or not nodelist[name].text.strip():
				self.nodelist[name] = node
			else:
				if node.is_context and nodelist[name].is_context:
					nodelist[name].value <<= node.value
					self.space += nodelist[name].value
		
		return self
	
	def __iter__(self):
		return self.nodelist.itervalues()
	
	def tospace(self):
		return Space((node.name, node.value) for node in self)
Exemple #19
0
class Root(object):
    """Container for all the processed data."""

    #: An OrderedDict mapping entry names to Entry instances.
    entries = None

    #: A list of Gloss instances.
    glosses = None

    #: A dict mapping stems in definitions to lists of Entry instances.
    definition_stems = None

    #: A dict mapping stems in notes to lists of Entry instances.
    note_stems = None

    #: A dict mapping the stem of glosses to lists of Gloss instances.
    gloss_stems = None

    #: A dict mapping grammatical classes to font sizes in ems.
    class_scales = None

    #: A dict mapping grammatical classes to lists
    #: of ``[chapter, section]`` lists.
    cll = None

    #: A dict mapping grammatical classes to terminating grammatical classes.
    terminators = None

    #: A string that changes if the database changes.
    etag = None

    def __init__(self, db):
        cfg = db.app.config

        root_path = db.app.root_path
        jbovlaste = cfg.get('VLASISKU_JBOVLASTE', 'data/jbovlaste.xml')
        class_scales = cfg.get('VLASISKU_CLASS_SCALES',
                               'data/class-scales.yml')
        cll = cfg.get('VLASISKU_CLL', 'data/cll.yml')
        terminators = cfg.get('VLASISKU_TERMINATORS', 'data/terminators.yml')

        self.class_scales = load_yaml(join(root_path, class_scales))
        self.cll = load_yaml(join(root_path, cll))
        self.terminators = load_yaml(join(root_path, terminators))

        with open(join(root_path, jbovlaste)) as f:
            xml = ElementTree.parse(f)
            self._load_entries(xml)
            self._load_glosses(xml)

        self.etag = str(getmtime(join(root_path, jbovlaste)))

    def query(self, query):
        """Query database with query language.

        >>> from vlasisku.extensions import database
        >>> len(database.root.query('class:UI4')['matches'])
        6

        """
        parsed_query = parse_query(query)
        matches = set()
        entry = self.entries.get(query, None)
        if entry:
            matches.add(entry)

        if parsed_query['all']:
            words = []

            glosses = self.matches_gloss(parsed_query['all'], matches)
            matches.update(g.entry for g in glosses)

            affix = self.matches_affix(parsed_query['all'], matches)
            matches.update(affix)

            classes = self.matches_class(parsed_query['all'], matches)
            classes += [e for e in self.entries.itervalues()
                          if e.grammarclass
                          and e not in classes
                          and re.split(r'[0-9*]', e.grammarclass)[0] == query]
            matches.update(classes)

            types = self.matches_type(parsed_query['all'], matches)
            matches.update(types)

            definitions = self.matches_definition(parsed_query['all'], matches)
            matches.update(definitions)

            notes = self.matches_notes(parsed_query['all'], matches)
            matches.update(notes)

        else:
            words = self.matches_word(parsed_query['word'])
            matches.update(words)

            glosses = self.matches_gloss(parsed_query['gloss'], matches)
            matches.update(g.entry for g in glosses)

            affix = self.matches_affix(parsed_query['affix'], matches)
            matches.update(affix)

            classes = self.matches_class(parsed_query['class'], matches)
            matches.update(classes)

            types = self.matches_type(parsed_query['type'], matches)
            matches.update(types)

            definitions = self.matches_definition(parsed_query['definition'], matches)
            matches.update(definitions)

            notes = self.matches_notes(parsed_query['notes'], matches)
            matches.update(notes)

        if parsed_query['word']:
            matches = set(e for e in self.matches_word(parsed_query['word'])
                            if e in matches)
        if parsed_query['gloss']:
            matches = set(g.entry for g in self.matches_gloss(parsed_query['gloss'])
                                  if g.entry in matches)
        if parsed_query['affix']:
            matches = set(e for e in self.matches_affix(parsed_query['affix'])
                            if e in matches)
        if parsed_query['class']:
            matches = set(e for e in self.matches_class(parsed_query['class'])
                            if e in matches)
        if parsed_query['type']:
            matches = set(e for e in self.matches_type(parsed_query['type'])
                            if e in matches)
        if parsed_query['definition']:
            matches = set(e for e
                            in self.matches_definition(parsed_query['definition'])
                            if e in matches)
        if parsed_query['notes']:
            matches = set(e for e in self.matches_notes(parsed_query['notes'])
                            if e in matches)

        words = [e for e in words if e in matches]
        glosses = [g for g in glosses if g.entry in matches]
        affix = [e for e in affix if e in matches]
        classes = [e for e in classes if e in matches]
        types = [e for e in types if e in matches]
        definitions = [e for e in definitions if e in matches]
        notes = [e for e in notes if e in matches]

        results = dict(locals())
        del results['self']
        return results

    def suggest(self, prefix):
        suggestions = []
        types = []
        entries = (e for e in self.entries.iterkeys()
                     if e.startswith(prefix))
        glosses = (g.gloss for g in self.glosses
                           if g.gloss.startswith(prefix))
        classes = set(e.grammarclass for e in self.entries.itervalues()
                                     if e.grammarclass
                                     and e.grammarclass.startswith(prefix))
        for x in xrange(5):
            with ignore(StopIteration):
                suggestions.append(entries.next())
                types.append(self.entries[suggestions[-1]].type)
            with ignore(StopIteration):
                gloss = glosses.next()
                if ' ' in gloss:
                    suggestions.append('"%s"' % gloss)
                else:
                    suggestions.append(gloss)
                types.append('gloss')
            with ignore(KeyError):
                suggestions.append(classes.pop())
                types.append('class')
        return [prefix, suggestions, types]

    @selector
    def matches_word(self, queries, exclude):
        return (e for q in queries
                  for e in self.entries.itervalues()
                  if fnmatch(e.word, q))

    @selector
    def matches_gloss(self, queries, exclude):
        queries = (stem(q.lower()) for q in queries)
        return (g for q in queries
                  for g in self.gloss_stems.get(q, [])
                  if all(g in self.gloss_stems.get(q, []) for q in queries)
                  if g.entry not in exclude)

    @selector
    def matches_affix(self, queries, exclude):
        return (e for e in self.entries.itervalues()
                  if e not in exclude
                  for q in queries
                  if any(fnmatch(a, q) for a in e.searchaffixes))

    @selector
    def matches_class(self, queries, exclude):
        return (e for q in queries
                  for e in self.entries.itervalues()
                  if e not in exclude
                  if q == e.grammarclass)

    @selector
    def matches_type(self, queries, exclude):
        return (e for q in queries
                  for e in self.entries.itervalues()
                  if e not in exclude
                  if fnmatch(e.type, q))

    @selector
    def matches_definition(self, queries, exclude):
        queries = (stem(q.lower()) for q in queries)
        return (e for q in queries
                  for e in self.definition_stems.get(q, [])
                  if all(e in self.definition_stems.get(q, [])
                         for q in queries)
                  if e not in exclude)

    @selector
    def matches_notes(self, queries, exclude):
        queries = (stem(q.lower()) for q in queries)
        return (e for q in queries
                  for e in self.note_stems.get(q, [])
                  if all(e in self.note_stems.get(q, []) for q in queries)
                  if e not in exclude)

    def _load_entries(self, xml):
        processors = {'rafsi': self._process_rafsi,
                      'selmaho': self._process_selmaho,
                      'definition': self._process_definition,
                      'notes': self._process_notes}

        self.entries = OrderedDict()
        self.definition_stems = {}
        self.note_stems = {}

        for type, _ in TYPES:
            for valsi in xml.findall('.//valsi'):
                if valsi.get('type') == type:
                    entry = Entry(self)
                    entry.type = type
                    entry.word = valsi.get('word')

                    if type in ('gismu', 'experimental gismu'):
                        entry.searchaffixes.append(entry.word)
                        entry.searchaffixes.append(entry.word[0:4])

                    for child in valsi.getchildren():
                        tag, text = child.tag, child.text
                        processors.get(tag, lambda a,b: None)(entry, text)

                    self.entries[entry.word] = entry

        for entry in self.entries.itervalues():
            if entry.notes:
                entry.notes = braces2links(entry.notes, self.entries)


    def _process_rafsi(self, entry, text):
        entry.affixes.append(text)
        entry.searchaffixes.append(text)

    def _process_selmaho(self, entry, text):
        entry.grammarclass = text
        for grammarclass, terminator in self.terminators.iteritems():
            if text == grammarclass:
                entry.terminator = terminator
            if text == terminator:
                entry.terminates.append(grammarclass)
        if text in self.cll:
            for path in self.cll[text]:
                section = '%s.%s' % tuple(path)
                link = 'http://dag.github.io/cll/%s/%s/'
                entry.cll.append((section, link % tuple(path)))

    def _process_definition(self, entry, text):
        if text is None:
            text = ""
        entry.definition = tex2html(text)
        entry.textdefinition = strip_html(entry.definition)
        tokens = re.findall(r"[\w']+", text, re.UNICODE)
        for token in set(tokens):
            add_stems(token, self.definition_stems, entry)

    def _process_notes(self, entry, text):
        entry.notes = tex2html(text)
        entry.textnotes = strip_html(entry.notes)
        tokens = re.findall(r"[\w']+", text, re.UNICODE)
        for token in set(tokens):
            add_stems(token, self.note_stems, entry)


    def _load_glosses(self, xml):
        self.glosses = []
        self.gloss_stems = {}
        for type, _ in TYPES:
            for word in xml.findall('.//nlword'):
                entry = self.entries[word.get('valsi')]
                if entry.type == type:
                    gloss = Gloss()
                    gloss.gloss = word.get('word')
                    gloss.entry = entry
                    gloss.sense = word.get('sense')
                    gloss.place = word.get('place')
                    self.glosses.append(gloss)
                    add_stems(gloss.gloss, self.gloss_stems, gloss)
    class HybridView(newcls, JSONResponseMixin):
        """Middleware class which add the JSONResponseMixin in the view to
        handle ajax requests"""

        def __init__(self, *args, **kwargs):
            """Our newcls can be an instance of several mixins working with the
            get and post functions.
            e.g : CreateView is instance of BaseCreateView and ProcessFormView
            Let's add our custom mixins that implement get and post without
            returning a render_to_response, and call """

            newcls.__init__(self, **kwargs)
            # The order matters for the get/post calls.
            self.mixins = OrderedDict()
            self.mixins[BaseListView] = BaseListViewMixin
            self.mixins[BaseCreateView] = BaseCreateViewMixin
            self.mixins[BaseUpdateView] = BaseUpdateViewMixin
            self.mixins[BaseDetailView] = BaseDetailViewMixin
            self.mixins[BaseDeleteView] = BaseDeleteViewMixin
            self.mixins[ProcessFormView] = ProcessFormViewMixin
            [self.mixins.pop(baseView) for baseView in self.mixins.iterkeys()
                if not isinstance(self, baseView)]

        @property
        def is_ajax(self):
            """Check the META HTTP_X_REQUESTED_WITH and CONTENT_TYPE"""
            meta = self.request.META
            return meta.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'\
                or "json" in meta.get("CONTENT_TYPE")

        def render_to_response(self, context):
            cls = type(self).__bases__[self.is_ajax]
            return cls.render_to_response(self, context)

        def get(self, request, **kwargs):
            if not self.is_ajax:
                "If it's not ajax, return the inherited get"
                return super(HybridView, self).get(self, **kwargs)

            for mixin in self.mixins.itervalues():
                self, kwargs = mixin().get(self, **kwargs)

            context = kwargs
            context.update(self.get_json_context(**kwargs))
            context.pop("form", None)
            context.pop("object", None)
            context.pop("object_list", None)
            return self.render_to_response(context)

        def post(self, request, **kwargs):
            """Hybrid post to handle all parents post actions"""
            if not self.is_ajax:
                "If it's not ajax, return the inherited get"
                return super(HybridView, self).post(self, **kwargs)

            for mixin in self.mixins.itervalues():
                try:
                    self, kwargs = mixin().post(self, **kwargs)
                except AttributeError:
                    """The inherited view doesn't handle post"""
                    pass

            context = kwargs
            context.update(self.get_json_context(**kwargs))
            context.pop("form", None)
            return self.render_to_response(context)
def getSphinxDocumentationApi(packages, cloneDirectory, outputDirectory, apiFile):
	"""
	This definition gets Sphinx documentation API.

	:param packages: Packages. ( String )
	:param cloneDirectory: Source clone directory. ( String )
	:param outputDirectory: Content directory. ( String )
	:param apiFile: API file. ( String )
	"""

	LOGGER.info("{0} | Building Sphinx documentation API!".format(getSphinxDocumentationApi.__name__))

	if os.path.exists(cloneDirectory):
		shutil.rmtree(cloneDirectory)
		os.makedirs(cloneDirectory)

	packagesModules = {"apiModules" : [],
					"testsModules" : []}
	for package in packages.split(","):
		package = __import__(package)
		path = foundations.common.getFirstItem(package.__path__)
		sourceDirectory = os.path.dirname(path)

		for file in sorted(list(foundations.walkers.filesWalker(sourceDirectory, filtersIn=("{0}.*\.ui$".format(path),)))):
			LOGGER.info("{0} | Ui file: '{1}'".format(getSphinxDocumentationApi.__name__, file))
			targetDirectory = os.path.dirname(file).replace(sourceDirectory, "")
			directory = "{0}{1}".format(cloneDirectory, targetDirectory)
			if not foundations.common.pathExists(directory):
				os.makedirs(directory)
			source = os.path.join(directory, os.path.basename(file))
			shutil.copyfile(file, source)

		modules = []
		for file in sorted(list(foundations.walkers.filesWalker(sourceDirectory, filtersIn=("{0}.*\.py$".format(path),),
		filtersOut=EXCLUDED_PYTHON_MODULES))):
			LOGGER.info("{0} | Python file: '{1}'".format(getSphinxDocumentationApi.__name__, file))
			module = "{0}.{1}" .format((".".join(os.path.dirname(file).replace(sourceDirectory, "").split("/"))),
												foundations.strings.getSplitextBasename(file)).strip(".")
			LOGGER.info("{0} | Module name: '{1}'".format(getSphinxDocumentationApi.__name__, module))
			directory = os.path.dirname(os.path.join(cloneDirectory, module.replace(".", "/")))
			if not foundations.common.pathExists(directory):
				os.makedirs(directory)
			source = os.path.join(directory, os.path.basename(file))
			shutil.copyfile(file, source)

			sourceFile = File(source)
			sourceFile.cache()
			trimFromIndex = trimEndIndex = None
			inMultilineString = inDecorator = False
			for i, line in enumerate(sourceFile.content):
				if re.search(r"__name__ +\=\= +\"__main__\"", line):
					trimFromIndex = i
				for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
					if re.search(pattern, line):
						sourceFile.content[i] = re.sub(pattern, value, line)

				strippedLine = line.strip()
				if re.search(r"^\"\"\"", strippedLine):
					inMultilineString = not inMultilineString

				if inMultilineString:
					continue

				if re.search(r"^@\w+", strippedLine) and \
				not re.search(r"@property", strippedLine) and \
				not re.search(r"^@\w+\.setter", strippedLine) and \
				not re.search(r"^@\w+\.deleter", strippedLine):
					inDecorator = True
					indent = re.search(r"^([ \t]*)", line)

				if re.search(r"^[ \t]*def \w+", sourceFile.content[i]) or \
					re.search(r"^[ \t]*class \w+", sourceFile.content[i]):
					inDecorator = False

				if not inDecorator:
					continue

				sourceFile.content[i] = "{0}{1} {2}".format(indent.groups()[0], DECORATORS_COMMENT_MESSAGE, line)

			if trimFromIndex:
				LOGGER.info("{0} | Trimming '__main__' statements!".format(getSphinxDocumentationApi.__name__))
				content = [sourceFile.content[i] for i in range(trimFromIndex)]
				content.append("{0}\n".format(STATEMENTS_UPDATE_MESSAGGE))
				sourceFile.content = content
			sourceFile.write()

			if "__init__.py" in file:
				continue

			rstFilePath = "{0}{1}".format(module, FILES_EXTENSION)
			LOGGER.info("{0} | Building API file: '{1}'".format(getSphinxDocumentationApi.__name__, rstFilePath))
			rstFile = File(os.path.join(outputDirectory, rstFilePath))
			header = ["_`{0}`\n".format(module),
					"==={0}\n".format("="*len(module)),
					"\n",
					".. automodule:: {0}\n".format(module),
					"\n"]
			rstFile.content.extend(header)

			functions = OrderedDict()
			classes = OrderedDict()
			moduleAttributes = OrderedDict()
			for member, object in moduleBrowser._readmodule(module, [source, ]).iteritems():
				if object.__class__ == moduleBrowser.Function:
					if not member.startswith("_"):
						functions[member] = [".. autofunction:: {0}\n".format(member)]
				elif object.__class__ == moduleBrowser.Class:
					classes[member] = [".. autoclass:: {0}\n".format(member),
										"	:show-inheritance:\n",
										"	:members:\n"]
				elif object.__class__ == moduleBrowser.Global:
					if not member.startswith("_"):
						moduleAttributes[member] = [".. attribute:: {0}.{1}\n".format(module, member)]

			moduleAttributes and rstFile.content.append("Module Attributes\n-----------------\n\n")
			for moduleAttribute in moduleAttributes.itervalues():
				rstFile.content.extend(moduleAttribute)
				rstFile.content.append("\n")

			functions and rstFile.content.append("Functions\n---------\n\n")
			for function in functions.itervalues():
				rstFile.content.extend(function)
				rstFile.content.append("\n")

			classes and rstFile.content.append("Classes\n-------\n\n")
			for class_ in classes.itervalues():
				rstFile.content.extend(class_)
				rstFile.content.append("\n")

			rstFile.write()
			modules.append(module)

		packagesModules["apiModules"].extend([module for module in modules if not "tests" in module])
		packagesModules["testsModules"].extend([module for module in modules if "tests" in module])

	apiFile = File(apiFile)
	apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN)
	for module in packagesModules["apiModules"]:
		apiFile.content.append("   {0} <{1}>\n".format(module, "api/{0}".format(module)))
	for module in packagesModules["testsModules"]:
		apiFile.content.append("   {0} <{1}>\n".format(module, "api/{0}".format(module)))
	apiFile.content.extend(TOCTREE_TEMPLATE_END)
	apiFile.write()
Exemple #22
0
def buildApi(packages, input, output, sanitizer, excludedModules=None):
    """
	Builds the Sphinx documentation API.

	:param packages: Packages to include in the API.
	:type packages: list
	:param input: Input modules directory.
	:type input: unicode
	:param output: Output reStructuredText files directory.
	:type output: unicode
	:param sanitizer: Sanitizer python module.
	:type sanitizer: unicode
	:param excludedModules: Excluded modules.
	:type excludedModules: list
	:return: Definition success.
	:rtype: bool
	"""

    LOGGER.info("{0} | Building Sphinx documentation API!".format(
        buildApi.__name__))

    sanitizer = importSanitizer(sanitizer)

    if os.path.exists(input):
        shutil.rmtree(input)
        os.makedirs(input)

    excludedModules = [] if excludedModules is None else excludedModules

    packagesModules = {"apiModules": [], "testsModules": []}
    for package in packages:
        package = __import__(package)
        path = foundations.common.getFirstItem(package.__path__)
        packageDirectory = os.path.dirname(path)

        for file in sorted(
                list(
                    foundations.walkers.filesWalker(
                        packageDirectory,
                        filtersIn=("{0}.*\.ui$".format(path), )))):
            LOGGER.info("{0} | Ui file: '{1}'".format(buildApi.__name__, file))
            targetDirectory = os.path.dirname(file).replace(
                packageDirectory, "")
            directory = "{0}{1}".format(input, targetDirectory)
            if not foundations.common.pathExists(directory):
                os.makedirs(directory)
            source = os.path.join(directory, os.path.basename(file))
            shutil.copyfile(file, source)

        modules = []
        for file in sorted(
                list(
                    foundations.walkers.filesWalker(
                        packageDirectory,
                        filtersIn=("{0}.*\.py$".format(path), ),
                        filtersOut=excludedModules))):
            LOGGER.info("{0} | Python file: '{1}'".format(
                buildApi.__name__, file))
            module = "{0}.{1}".format(
                (".".join(
                    os.path.dirname(file).replace(packageDirectory,
                                                  "").split("/"))),
                foundations.strings.getSplitextBasename(file)).strip(".")
            LOGGER.info("{0} | Module name: '{1}'".format(
                buildApi.__name__, module))
            directory = os.path.dirname(
                os.path.join(input, module.replace(".", "/")))
            if not foundations.common.pathExists(directory):
                os.makedirs(directory)
            source = os.path.join(directory, os.path.basename(file))
            shutil.copyfile(file, source)

            sanitizer.bleach(source)

            if "__init__.py" in file:
                continue

            rstFilePath = "{0}{1}".format(module, FILES_EXTENSION)
            LOGGER.info("{0} | Building API file: '{1}'".format(
                buildApi.__name__, rstFilePath))
            rstFile = File(os.path.join(output, rstFilePath))
            header = [
                "_`{0}`\n".format(module),
                "==={0}\n".format("=" * len(module)), "\n",
                ".. automodule:: {0}\n".format(module), "\n"
            ]
            rstFile.content.extend(header)

            functions = OrderedDict()
            classes = OrderedDict()
            moduleAttributes = OrderedDict()
            for member, object in moduleBrowser._readmodule(
                    module, [
                        source,
                    ]).iteritems():
                if object.__class__ == moduleBrowser.Function:
                    if not member.startswith("_"):
                        functions[member] = [
                            ".. autofunction:: {0}\n".format(member)
                        ]
                elif object.__class__ == moduleBrowser.Class:
                    classes[member] = [
                        ".. autoclass:: {0}\n".format(member),
                        "	:show-inheritance:\n", "	:members:\n"
                    ]
                elif object.__class__ == moduleBrowser.Global:
                    if not member.startswith("_"):
                        moduleAttributes[member] = [
                            ".. attribute:: {0}.{1}\n".format(module, member)
                        ]

            moduleAttributes and rstFile.content.append(
                "Module Attributes\n-----------------\n\n")
            for moduleAttribute in moduleAttributes.itervalues():
                rstFile.content.extend(moduleAttribute)
                rstFile.content.append("\n")

            functions and rstFile.content.append("Functions\n---------\n\n")
            for function in functions.itervalues():
                rstFile.content.extend(function)
                rstFile.content.append("\n")

            classes and rstFile.content.append("Classes\n-------\n\n")
            for class_ in classes.itervalues():
                rstFile.content.extend(class_)
                rstFile.content.append("\n")

            rstFile.write()
            modules.append(module)

        packagesModules["apiModules"].extend(
            [module for module in modules if not "tests" in module])
        packagesModules["testsModules"].extend(
            [module for module in modules if "tests" in module])

    apiFile = File("{0}{1}".format(output, FILES_EXTENSION))
    apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN)
    for module in packagesModules["apiModules"]:
        apiFile.content.append("   {0} <{1}>\n".format(
            module, "api/{0}".format(module)))
    for module in packagesModules["testsModules"]:
        apiFile.content.append("   {0} <{1}>\n".format(
            module, "api/{0}".format(module)))
    apiFile.content.extend(TOCTREE_TEMPLATE_END)
    apiFile.write()

    return True