Exemple #1
0
    def begetDataBody(self, *pa, **kwa):
        """ Returns duple (data,body) for request from arguments
            positional arguments may be either strings, sequences of key,value
            duples, or dictionaries.
            
            Body is string concated from all string positional arguments.
            Non string positional arguments and keyword arguments are ignored.
            
            Data is an ordereddict where the items are extracted from
            self.preloads, any sequenced duples and/or dicts in the positonal
            arguments, and from the keyword arguments.
            String positional arguments are ignored.
        """
        body = ''
        data = ODict(self.preloads) #load defaults

        for a in pa: #extract key/value pairs from positional parameters
            if isinstance(a, basestring): # string
                body = '%s%s' %  (body,  a) #concat
            else: # dict or sequence of duples
                data.update(a)
    
        data.update(kwa) #so insert keyword arguments
        
        if isinstance(body, unicode):
            body = body.encode('utf-8')
            if 'utf-8' not in self.headers['content-type']:
                self.headers['content-type'] += '; charset=utf-8'        
        
        return (data, body)
Exemple #2
0
    def get(self, row, columns=None, column_start=None, super_column=None, column_finish=None, column_count=100):
        try:
            if super_column is None:
                data_columns = self.data[row]
            else:
                data_columns = self.data[row][super_column]
            results = OrderedDict()
            count = 0
            if columns is not None:
                for c in columns:
                    results[c] = data_columns[c]
            else:
                for c in sorted(data_columns.keys()):
                    if count > column_count:
                        break
                    if column_start and cmp(c,column_start) < 0:
                        continue
                    if column_finish and cmp(c, column_finish) > 0:
                        break

                    results[c] = data_columns[c]
                    count += 1
            if not len(results):
                raise NotFoundException
            for key, value in results.items():
                if isinstance(value, dict) and len(value) == 0:
                    del(results[key])
                if value is None:
                    del(results[key])
            return results
        except KeyError:
            raise NotFoundException
class EmailResponder(plugins.Responder):
    def __init__(self, optionMap, *args):
        plugins.Responder.__init__(self)
        self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date
        self.batchAppData = OrderedDict()
        self.allApps = OrderedDict()

    def notifyComplete(self, test):
        if test.app.emailEnabled():
            if not self.batchAppData.has_key(test.app):
                self.addApplication(test)
            self.batchAppData[test.app].storeCategory(test)

    def getRootSuite(self, test):
        if test.parent:
            return self.getRootSuite(test.parent)
        else:
            return test
       
    def addApplication(self, test):
        rootSuite = self.getRootSuite(test)
        app = test.app
        self.batchAppData[app] = BatchApplicationData(rootSuite)
        self.allApps.setdefault(app.name, []).append(app)
        
    def notifyAllComplete(self):
        mailSender = MailSender(self.runId)
        for appList in self.allApps.values():
            batchDataList = map(self.batchAppData.get, appList)
            mailSender.send(batchDataList)
Exemple #4
0
def all_plugins():
    """Return a dict of plugin name -> Plugin for all plugins, including core.

    Plugins are registered via the ``dxr.plugins`` setuptools entry point,
    which may point to either a module (in which case a Plugin will be
    constructed based on the contents of the module namespace) or a Plugin
    object (which will be returned directly). The entry point name is what the
    user types into the config file under ``enabled_plugins``.

    The core plugin, which provides many of DXR's cross-language, built-in
    features, is always the first plugin when iterating over the returned
    dict. This lets other plugins override bits of its elasticsearch mappings
    and analyzers when we're building up the schema.

    """
    global _plugin_cache

    def name_and_plugin(entry_point):
        """Return the name of an entry point and the Plugin it points to."""
        object = entry_point.load()
        plugin = (object if isinstance(object, Plugin) else
                  Plugin.from_namespace(object.__dict__))
        plugin.name = entry_point.name
        return entry_point.name, plugin

    if _plugin_cache is None:
        # Iterating over entrypoints could be kind of expensive, with the FS
        # reads and all.
        _plugin_cache = OrderedDict([('core', core_plugin())])
        _plugin_cache.update(name_and_plugin(point) for point in
                             iter_entry_points('dxr.plugins'))

    return _plugin_cache
Exemple #5
0
 def get_field_names_for_question_set(questions, respondent_set):
     fields = OrderedDict()
     for qu in questions:
         field_names = CsvFieldGenerator.get_field_names_for_question(qu, respondent_set)
         if field_names is not None:
             fields = OrderedDict(fields.items() + field_names.items())
     return fields
class JUnitResponder(plugins.Responder):
    """Respond to test results and write out results in format suitable for JUnit
    report writer. Only does anything if the app has batch_junit_format:true in its config file """
    
    def __init__(self, optionMap, *args):
        plugins.Responder.__init__(self)
        self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date
        self.allApps = OrderedDict()
        self.appData = OrderedDict()

    def useJUnitFormat(self, app):
        return app.getBatchConfigValue("batch_junit_format") == "true"
    
    def notifyComplete(self, test):
        if not self.useJUnitFormat(test.app):
            return
        if not self.appData.has_key(test.app):
            self._addApplication(test)
        self.appData[test.app].storeResult(test)
        
    def notifyAllComplete(self):
        # allApps is {appname : [app]}
        for appList in self.allApps.values():
            # appData is {app : data}
            for app in appList:
                if self.useJUnitFormat(app):
                    data = self.appData[app]
                    ReportWriter(self.runId).writeResults(app, data)
      
    def _addApplication(self, test):
        app = test.app
        self.appData[app] = JUnitApplicationData()
        self.allApps.setdefault(app.name, []).append(app)
Exemple #7
0
def kmeans_get_cluster_values(data, clusters):
    values = OrderedDict()
    for dataKey, cluster in clusters.items():
        if not values.has_key(cluster):
            values[cluster] = []
        values[cluster].append(data[dataKey])
    return values
class TestIteratorMap:
    def __init__(self, dynamic, allApps):
        self.dict = OrderedDict()
        self.dynamic = dynamic
        self.parentApps = {}
        for app in allApps:
            for extra in [ app ] + app.extras:
                self.parentApps[extra] = app
    def getKey(self, test):
        if self.dynamic:
            return test
        elif test is not None:
            return self.parentApps.get(test.app, test.app), test.getRelPath()
    def store(self, test, iter):
        self.dict[self.getKey(test)] = iter
    def updateIterator(self, test, oldRelPath):
        # relative path of test has changed
        key = self.parentApps.get(test.app, test.app), oldRelPath
        iter = self.dict.get(key)
        if iter is not None:
            self.store(test, iter)
            del self.dict[key]
            return iter
        else:
            return self.getIterator(test)

    def getIterator(self, test):
        return self.dict.get(self.getKey(test))

    def remove(self, test):
        key = self.getKey(test)
        if self.dict.has_key(key):
            del self.dict[key]
Exemple #9
0
def parse(tokens):
    if tokens[0] == "{":
        ret = OrderedDict()
        tokens = tokens[1:]
        while tokens[0] != "}":
            key = tokens[0]
            tokens = tokens[1:]

            tokens = tokens[1:]  # :

            value, tokens = parse(tokens)

            if tokens[0] == ",":
                tokens = tokens[1:]

            ret[key] = value
        tokens = tokens[1:]
        return ret, tokens
    elif tokens[0] == "[":
        ret = []
        tokens = tokens[1:]
        while tokens[0] != "]":
            value, tokens = parse(tokens)
            if tokens[0] == ",":
                tokens = tokens[1:]
            ret.append(value)
        tokens = tokens[1:]
        return ret, tokens
    else:
        return tokens[0], tokens[1:]
    def _parse(self, menu):
        di = OrderedDict()
        for value in menu:
            id = value.get('id', value.get('title'))
            extra = value.get('extra', None)
            separator = False
            if extra:
                separator = extra.get('separator', None)

            title = value.get('title')
            if isinstance(title, str):
                translated_title = translate(_p(title), context=self.request)
            else:
                translated_title = translate(value.get('title'), domain=_p, context=self.request)
            su = OrderedDict(label=translated_title,
                       action=self._action(value.get('action', '')),
                       icon=value.get('icon', ''),
                       _class=value.get('class', ''),
                       separator_before=separator,
                       _disabled=(not value.get('action', False) and separator) and True or False,
                      )

            if value.get('submenu', None):
                su.update(OrderedDict(submenu=self._parse(value.get('submenu'))))
            di[id] = su
        return di
def gen_mapping(infile):
    """Generate a mapping file based on infile csv
    """
    fh = open(infile)
    cont = fh.read()
    fh.close()
    row = cont.split(LINE_END)[0]

    from ordereddict import OrderedDict
    out = OrderedDict()
    for x in row.split(","):
        #y = space_word(x)
        out.update({ x : x})

    ret = """# coding: utf-8
'''
mapping from Google Contact standard field name to
our customized fields name. 
Edit it to make it fit your fields.

About Mapping:
-----------------
MAPPING = {
    "Goolge_Field" : "Our Field",
    ...
}

So we need to check and edit the left side of the dict(i.e Key)
to make the right side(i.e Value) match the left as possible.
'''
"""
    from django.utils import simplejson
    ret += "MAPPING = " + simplejson.dumps(out, indent=4)
    outfile = open(OUT_FILE_NAME, "w")
    outfile.write(ret)
Exemple #12
0
    def __init__(self, file, mode='file'):
        """Start dividing 'file' in chunks.

        'file' is the to chunk up. By default it is the name of a file
        on the filesystem, but with 'mode' set to 'string', 'file' is
        passed as a string.
        """

        log.debug("Initialising module parser for file '%s'.",
                  file)
        # Dictionary inits
        self.classes = OrderedDict()
        self.functions = OrderedDict()
        self.protectedSections = OrderedDict()
        self.protectionDeclarations = []
        # Read and mangle the file
        self.filebuf = self.readFile(file, mode)
        self.splitSource()
        # Note: ast = abstract syntax tree (python internal thingy),
        # generated by the imported 'parser'.
        self.filebuf = self.filebuf.encode(self.encoding)
        self.ast = parser.suite(self.filebuf)
            
        self.code = self.ast.compile()
        # The next two filter out the classes and the top-level
        # functions in the sourcefile and the protected
        # sections. Beware that the rest is left out!
        self.findClassesAndFunctions()
        self.findProtectedSections()
        self.findProtectionDeclarations()
 def test_delitem(self):
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     od = OrderedDict(pairs)
     del od['a']
     self.assert_('a' not in od)
     self.assertRaises(KeyError, od.__delitem__, 'a')
     self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
 def test_clear(self):
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     shuffle(pairs)
     od = OrderedDict(pairs)
     self.assertEqual(len(od), len(pairs))
     od.clear()
     self.assertEqual(len(od), 0)
Exemple #15
0
 def read_all(self):
     """read all available sheets"""
     result = OrderedDict()
     for sheet in self.native_book.sheets:
         data_dict = self.read_sheet(sheet)
         result.update(data_dict)
     return result
class ActionRunner(BaseActionRunner):
    def __init__(self, optionMap, *args):
        BaseActionRunner.__init__(self, optionMap, logging.getLogger("Action Runner"))
        self.currentTestRunner = None
        self.previousTestRunner = None
        self.appRunners = OrderedDict()

    def addSuite(self, suite):
        plugins.log.info("Using " + suite.app.description(includeCheckout=True))
        appRunner = ApplicationRunner(suite, self.diag)
        self.appRunners[suite.app] = appRunner

    def notifyAllReadAndNotified(self):
        # kicks off processing. Don't use notifyAllRead as we end up running all the tests before
        # everyone's been notified of the reading.
        self.runAllTests() 

    def notifyRerun(self, test):
        if self.currentTestRunner and self.currentTestRunner.test is test:
            self.diag.info("Got rerun notification for " + repr(test) + ", resetting actions")
            self.currentTestRunner.resetActionSequence()

    def runTest(self, test):
        # We have the lock coming in to here...
        appRunner = self.appRunners.get(test.app)
        if appRunner:
            self.lock.acquire()
            self.currentTestRunner = TestRunner(test, appRunner, self.diag, self.exited, self.killSignal)
            self.lock.release()

            self.currentTestRunner.performActions(self.previousTestRunner)
            self.previousTestRunner = self.currentTestRunner

            self.lock.acquire()
            self.currentTestRunner = None
            self.notifyComplete(test)
            self.lock.release()

    def killTests(self):
        if self.currentTestRunner:
            self.currentTestRunner.kill(self.killSignal)

    def killOrCancel(self, test):
        if self.currentTestRunner and self.currentTestRunner.test is test:
            self.currentTestRunner.kill()
        else:
            self.cancel(test)

    def getAllActionClasses(self):
        classes = set()
        for appRunner in self.appRunners.values():
            for action in appRunner.actionSequence:
                classes.add(action.__class__)
        return classes
            
    def cleanup(self):
        for actionClass in self.getAllActionClasses():
            actionClass.finalise()
        for appRunner in self.appRunners.values():
            appRunner.cleanActions()
 def addContents(self):
     dirToProperties = OrderedDict()
     props = self.getAllProperties()
     for prop in props:
         dirToProperties.setdefault(prop.dir, []).append(prop)
     vbox = self.createVBox(dirToProperties)
     self.dialog.vbox.pack_start(vbox, expand=True, fill=True)
Exemple #18
0
    def update_metadata(self, new_metadata):
        if not new_metadata:
            return
        print "Update metadata?"
        has_changes = False
        for key, val in new_metadata.items():
            if self.metadata.get(key) != val:
                has_changes = True
        if not has_changes:
            return
        print "Has changes"
        data = deepcopy(self.metadata)
        data.update(new_metadata)
        data = OrderedDict(sorted(data.items(), key=lambda t: t[0]))        

        new_json = json.dumps(data, indent=4)
        
        f = codecs.open(self.full_local_path, 'r', 'utf-8')
        org_content = f.read()
        f.close()
        def replacer(m):
            parts = m.group(0).split('\n')
            return parts[0].strip() + '\n' + new_json + '\n' + parts[-1].strip()

        new_content = self._json_comment_re.sub(replacer, org_content)
        print new_content
        if new_content == org_content:
            return
        print "Write new content"
        f = codecs.open(self.full_local_path, 'w', 'utf-8')
        f.write(new_content)
        f.close()
Exemple #19
0
def test_form(req):
    # get_var_info = {'len': len(args)}
    var_info = OrderedDict((
        ('form_keys', req.form.keys()),
        ('form_values', ', '.join(tuple(uni(req.form.getvalue(key))
                                  for key in req.form.keys()))),
        ('form_getfirst', '%s,%s' % (req.form.getfirst('pname'),
                                     req.form.getfirst('px'))),
        ('form_getlist', '%s,%s' % (req.form.getlist('pname'),
                                    req.form.getlist('px'))),
        ('', ''),
        ('args_keys', req.args.keys()),
        ('args_values', ', '.join(tuple(uni(req.args[key])
                                        for key in req.args.keys()))),
        ('args_getfirst', '%s,%s' % (req.args.getfirst('gname'),
                                     req.args.getfirst('gx'))),
        ('args_getlist', '%s,%s' % (req.args.getlist('gname'),
                                    req.args.getlist('gx'))),
        ))

    buff = get_header("HTTP Form args test") + \
        (get_crumbnav(req),
         "<h2>Get Form</h2>",
         '<form method="get">',
         '<input type="text" name="gname" value="Ondřej"><br/>',
         '<input type="text" name="gsurname" value="Tůma"><br/>',
         '<input type="text" name="gx" value="1">'
         '<input type="text" name="gx" value="2">'
         '<input type="text" name="gx" value="3"><br/>',
         '<input type="submit" name="btn" value="Send">'
         '</form>',
         "<h2>Post Form</h2>",
         '<form method="post">',
         '<input type="text" name="pname" value="Ondřej"><br/>',
         '<input type="text" name="psurname" value="Tůma"><br/>',
         '<input type="text" name="px" value="8">'
         '<input type="text" name="px" value="7">'
         '<input type="text" name="px" value="6"><br/>',
         '<input type="submit" name="btn" value="Send">'
         '</form>',
         "<h2>Variables</h2>",
         "<table>") + \
        tuple("<tr><td>%s:</td><td>%s</td></tr>" %
              (key, html(val)) for key, val in var_info.items()) + \
        ("</table>",
         "<h2>Browser Headers</h2>",
         "<table>") + \
        tuple("<tr><td>%s:</td><td>%s</td></tr>" %
              (key, val) for key, val in req.headers_in.items()) + \
        ("</table>",
         "<h2>Request Variables </h2>",
         "<table>") + \
        tuple("<tr><td>%s:</td><td>%s</td></tr>" %
              (key, val) for key, val in get_variables(req)) + \
        ("</table>",) + \
        get_footer()

    for line in buff:
        req.write(line + '\n')
    return state.OK
Exemple #20
0
    def __init__(self):
        if not hasattr(self.__class__, 'FILTERS'):
            self.__class__.FILTERS = dexy.introspect.filters(Constants.NULL_LOGGER)
        if not hasattr(self.__class__, 'SOURCE_CODE'):
            artifact_class_source = inspect.getsource(self.__class__)
            artifact_py_source = inspect.getsource(Artifact)
            self.__class__.SOURCE_CODE = hashlib.md5(artifact_class_source + artifact_py_source).hexdigest()

        self._inputs = {}
        self.additional = None
        self.db = [] # accepts 'append'
        self.args = {}
        self.args['globals'] = {}

        self.is_last = False
        self.artifact_class_source = self.__class__.SOURCE_CODE
        self.artifacts_dir = 'artifacts' # TODO don't hard code
        self.batch_id = None
        self.binary_input = None
        self.binary_output = None
        self.ctime = None
        self.data_dict = OrderedDict()
        self.dexy_version = Version.VERSION
        self.dirty = False
        self.document_key = None
        self.elapsed = 0
        self.final = None
        self.initial = None
        self.inode = None
        self.input_data_dict = OrderedDict()
        self.key = None
        self.log = logging.getLogger()
        self.mtime = None
        self.state = 'new'
    def update_metadata(self, new_metadata):
        if not new_metadata:
            return
        has_changes = False
        for key, val in new_metadata.items():
            if self.metadata.get(key) != val:
                has_changes = True
        if not has_changes:
            return
        data = deepcopy(self.metadata)
        data.update(new_metadata)
        data = OrderedDict(sorted(data.items(), key=lambda t: t[0]))        

        new_json = json.dumps(data, indent=4)
        

        org_content = _read_unicode_file_dammit(self.full_local_path)

        def replacer(m):
            parts = m.group(0).split('\n')
            return parts[0].strip() + '\n' + new_json + '\n' + parts[-1].strip()

        new_content = self._json_comment_re.sub(replacer, org_content)
        if new_content == org_content:
            return
        logger.debug("Writing new metadata")
        f = codecs.open(self.full_local_path, 'w', 'utf-8')
        f.write(new_content)
        f.close()
Exemple #22
0
 def __init__(self, filePath=None, key=None):
     OrderedDict.__init__(self)
     if filePath != None and os.path.isfile(filePath):
         self.read(filePath, key)
     else:
         self._filename = filePath
     self.curStanza = None
    def edge_list(self):
        """ Return the list of edges for the derivatives of this workflow. """

        self._edges = super(CyclicWorkflow, self).edge_list()

        # TODO: Shouldn't have to do this everytime.
        if len(self._mapped_severed_edges) > 0:

            cyclic_edges = OrderedDict()
            for edge in self._mapped_severed_edges:
                cyclic_edges[edge[0]] = edge[1]

            # Finally, modify our edge list to include the severed edges, and exclude
            # the boundary edges.
            for src, targets in self._edges.iteritems():
                if '@in' not in src or \
                   not any(edge in cyclic_edges.values() for edge in targets):
                    if isinstance(targets, str):
                        targets = [targets]

                    newtargets = []
                    for target in targets:
                        if '@out' not in target or \
                           src not in cyclic_edges:
                            newtargets.append(target)

                    if len(newtargets) > 0:
                        cyclic_edges[src] = newtargets

            self._edges = cyclic_edges

        return self._edges
Exemple #24
0
 def __init__(self, name, g):
     verifyCollectionDecls("item", g.itemColls)
     steps = [ x.step for x in g.stepRels ]
     verifyCollectionDecls("step", steps)
     self.name = name
     # items
     self.itemDeclarations = OrderedDict((i.collName, makeItemDecl(i)) for i in g.itemColls)
     self.concreteItems = [ i for i in self.itemDeclarations.values() if not i.isVirtual ]
     # item virtual mappings
     self.vms = [ i for i in self.itemDeclarations.values() if i.isVirtual ]
     self.inlineVms = [ i for i in self.vms if i.isInline ]
     self.externVms = [ i for i in self.vms if not i.isInline ]
     # steps / pseudo-steps
     self.stepFunctions = OrderedDict((x.step.collName, StepFunction(x)) for x in g.stepRels)
     verifyEnv(self.stepFunctions)
     self.initFunction = self.stepFunctions.pop(initNameRaw)
     self.initFunction.collName = "cncInitialize"
     self.finalizeFunction = self.stepFunctions.pop(finalizeNameRaw)
     self.finalizeFunction.collName = "cncFinalize"
     self.finalAndSteps = [self.finalizeFunction] + self.stepFunctions.values()
     # set up step attribute lookup dict
     self.stepLikes = OrderedDict(self.stepFunctions)
     self.stepLikes[self.initFunction.collName] = self.initFunction
     self.stepLikes[self.finalizeFunction.collName] = self.finalizeFunction
     # attribute tracking
     self.allAttrNames = set()
     # context
     self.ctxParams = filter(bool, map(strip, g.ctx.splitlines())) if g.ctx else []
def scan_file(file_name, subfile):
    # start analysis with basic info
    pe = pefile.PE(file_name)
    machine = pe.FILE_HEADER.Machine
    all_results = OrderedDict( [
        ('MD5',             get_hash(file_name, 'md5')),
        ('SHA1',            get_hash(file_name, 'sha1')),
        ('SHA256',          get_hash(file_name, 'sha256')),
        ('Type',            commands.getoutput('file %s' % file_name).split(file_name + ': ')[1]),
        ('Size',            (os.path.getsize(file_name))/1000),
        ('SSDeep',          get_ssdeep(file_name)),
        #('ImpHash',        pe.get_imphash()),
        ('Arch',            pefile.MACHINE_TYPE[machine]),
        ('Entry Point',     hex(pe.OPTIONAL_HEADER.AddressOfEntryPoint)),
        ('Compiled',        datetime.datetime.fromtimestamp(pe.FILE_HEADER.TimeDateStamp)),
        ('Start Address',   grep_saddress(file_name))
    ] )

    print '\n[ %s ]' % file_name
    for key, value in all_results.iteritems():
        if key == 'Compiled' or key == 'Entry Point' or key == 'Start Address':
            print '%s:\t\t%s' % (key, value)
        else:
            print '%s:\t\t\t%s' % (key, value)

    if subfile:
        print '\n'
        check_subfile(file_name)
 def __init__(self, optionMap, allApps):
     BaseActionRunner.__init__(self, optionMap, logging.getLogger("Queue System Submit"))
     # queue for putting tests when we couldn't reuse the originals
     self.reuseFailureQueue = Queue()
     self.testCount = 0
     self.testsSubmitted = 0
     self.maxCapacity = 100000 # infinity, sort of
     self.allApps = allApps
     for app in allApps:
         currCap = app.getConfigValue("queue_system_max_capacity")
         if currCap is not None and currCap < self.maxCapacity:
             self.maxCapacity = currCap
         
     self.jobs = OrderedDict()
     self.submissionRules = {}
     self.killedJobs = {}
     self.queueSystems = {}
     self.reuseOnly = False
     self.submitAddress = None
     self.slaveLogDirs = set()
     self.delayedTestsForAdd = []
     self.remainingForApp = OrderedDict()
     capacityPerSuite = self.maxCapacity / len(allApps)
     for app in allApps:
         self.remainingForApp[app.name] = capacityPerSuite
         self.getQueueSystem(app) # populate cache
     QueueSystemServer.instance = self
Exemple #27
0
def parse(tokens):
    if tokens[0] == '{':
        ret = OrderedDict()
        tokens = tokens[1:]
        while tokens[0] != '}':
            key = tokens[0]
            tokens = tokens[1:]

            tokens = tokens[1:] # :

            value, tokens = parse(tokens)

            if tokens[0] == ',':
                tokens = tokens[1:]

            ret[key] = value
        tokens = tokens[1:]
        return ret, tokens
    elif tokens[0] == '[':
        ret = []
        tokens = tokens[1:]
        while tokens[0] != ']':
            value, tokens = parse(tokens)
            if tokens[0] == ',':
                tokens = tokens[1:]
            ret.append(value)
        tokens = tokens[1:]
        return ret, tokens
    else:
        return tokens[0], tokens[1:]
Exemple #28
0
 def read_all(self):
     result = OrderedDict()
     self.native_book = self._get_book()
     for sheet in self.native_book.sheets():
         data_dict = self.read_sheet(sheet)
         result.update(data_dict)
     return result
    def collectFilesForApp(self, app):
        fileBodies = []
        totalValues = OrderedDict()
        rootDir = app.getPreviousWriteDirInfo()
        if not os.path.isdir(rootDir):
            sys.stderr.write("No temporary directory found at " + rootDir + " - not collecting batch reports.\n")
            return
        dirlist = os.listdir(rootDir)
        dirlist.sort()
        compulsoryVersions = set(app.getBatchConfigValue("batch_collect_compulsory_version"))
        versionsFound = set()
        for dir in dirlist:
            fullDir = os.path.join(rootDir, dir)
            if os.path.isdir(fullDir) and self.matchesApp(dir, app):
                currBodies, currVersions = self.parseDirectory(fullDir, app, totalValues)
                fileBodies += currBodies
                versionsFound.update(currVersions)
        if len(fileBodies) == 0:
            self.diag.info("No information found in " + rootDir)
            return

        missingVersions = compulsoryVersions.difference(versionsFound)

        mailTitle = self.getTitle(app, totalValues)
        mailContents = self.mailSender.createMailHeaderForSend(self.runId, mailTitle, app)
        mailContents += self.getMailBody(app, fileBodies, missingVersions)
        allCats = set(totalValues.keys())
        noMailCats = set([ "succeeded", "known bugs" ])
        allSuccess = allCats.issubset(noMailCats)
        self.mailSender.sendOrStoreMail(app, mailContents, isAllSuccess=allSuccess)
Exemple #30
0
def _get_csv_data(user_profile):
    data = OrderedDict()
    data['application_status_ID'] = user_profile.applicationstatus.pk
    data['profile'] = _get_profile_data(user_profile)
    data.update(_get_csv_form_data(user_profile))
    # data ['optional_subjects'] = _get_other_subjects_data( user_profile )
    return data