class ActionRunner(BaseActionRunner):
    def __init__(self, optionMap, *args):
        BaseActionRunner.__init__(self, optionMap, logging.getLogger("Action Runner"))
        self.currentTestRunner = None
        self.previousTestRunner = None
        self.appRunners = OrderedDict()

    def addSuite(self, suite):
        plugins.log.info("Using " + suite.app.description(includeCheckout=True))
        appRunner = ApplicationRunner(suite, self.diag)
        self.appRunners[suite.app] = appRunner

    def notifyAllReadAndNotified(self):
        # kicks off processing. Don't use notifyAllRead as we end up running all the tests before
        # everyone's been notified of the reading.
        self.runAllTests() 

    def notifyRerun(self, test):
        if self.currentTestRunner and self.currentTestRunner.test is test:
            self.diag.info("Got rerun notification for " + repr(test) + ", resetting actions")
            self.currentTestRunner.resetActionSequence()

    def runTest(self, test):
        # We have the lock coming in to here...
        appRunner = self.appRunners.get(test.app)
        if appRunner:
            self.lock.acquire()
            self.currentTestRunner = TestRunner(test, appRunner, self.diag, self.exited, self.killSignal)
            self.lock.release()

            self.currentTestRunner.performActions(self.previousTestRunner)
            self.previousTestRunner = self.currentTestRunner

            self.lock.acquire()
            self.currentTestRunner = None
            self.notifyComplete(test)
            self.lock.release()

    def killTests(self):
        if self.currentTestRunner:
            self.currentTestRunner.kill(self.killSignal)

    def killOrCancel(self, test):
        if self.currentTestRunner and self.currentTestRunner.test is test:
            self.currentTestRunner.kill()
        else:
            self.cancel(test)

    def getAllActionClasses(self):
        classes = set()
        for appRunner in self.appRunners.values():
            for action in appRunner.actionSequence:
                classes.add(action.__class__)
        return classes
            
    def cleanup(self):
        for actionClass in self.getAllActionClasses():
            actionClass.finalise()
        for appRunner in self.appRunners.values():
            appRunner.cleanActions()
Exemple #2
0
def sliceReStructuredText(input, output):
	"""
	Slices given reStructuredText file.

	:param input: ReStructuredText file to slice.
	:type input: unicode
	:param output: Directory to output sliced reStructuredText files.
	:type output: unicode
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Slicing '{1}' file!".format(sliceReStructuredText.__name__, input))
	file = File(input)
	file.cache()

	slices = OrderedDict()
	for i, line in enumerate(file.content):
		search = re.search(r"^\.\. \.(\w+)", line)
		if search:
			slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

	index = 0
	for slice, sliceStart in slices.iteritems():
		sliceFile = File(os.path.join(output, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
		LOGGER.info("{0} | Outputing '{1}' file!".format(sliceReStructuredText.__name__, sliceFile.path))
		sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \
				   len(file.content)

		for i in range(sliceStart, sliceEnd):
			skipLine = False
			for item in CONTENT_DELETION:
				if re.search(item, file.content[i]):
					LOGGER.info("{0} | Skipping Line '{1}' with '{2}' content!".format(sliceReStructuredText.__name__,
																					   i,
																					   item))
					skipLine = True
					break

			if skipLine:
				continue

			line = file.content[i]
			for pattern, value in STATEMENT_SUBSTITUTE.iteritems():
				line = re.sub(pattern, value, line)

			search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
			if search:
				LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(sliceReStructuredText.__name__,
																			i,
																			search.groups()[0]))
				line = "-  :ref:`{0}`\n".format(search.groups()[0])
			sliceFile.content.append(line)

		sliceFile.write()
		index += 1

	return True
Exemple #3
0
def sliceDocumentation(fileIn, outputDirectory):
    """
	This Definition slices given documentation file.

	:param fileIn: File to convert. ( String )
	:param outputDirectory: Output directory. ( String )
	"""

    LOGGER.info("{0} | Slicing '{1}' file!".format(sliceDocumentation.__name__,
                                                   fileIn))
    file = File(fileIn)
    file.cache()

    slices = OrderedDict()
    for i, line in enumerate(file.content):
        search = re.search(r"^\.\. \.(\w+)", line)
        if search:
            slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

    index = 0
    for slice, sliceStart in slices.iteritems():
        sliceFile = File(
            os.path.join(outputDirectory,
                         "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
        LOGGER.info("{0} | Outputing '{1}' file!".format(
            sliceDocumentation.__name__, sliceFile.path))
        sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \
        len(file.content)

        for i in range(sliceStart, sliceEnd):
            skipLine = False
            for item in CONTENT_DELETION:
                if re.search(item, file.content[i]):
                    LOGGER.info(
                        "{0} | Skipping Line '{1}' with '{2}' content!".format(
                            sliceDocumentation.__name__, i, item))
                    skipLine = True
                    break

            if skipLine:
                continue

            line = file.content[i]
            for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
                line = re.sub(pattern, value, line)

            search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
            if search:
                LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(
                    sliceDocumentation.__name__, i,
                    search.groups()[0]))
                line = "-  :ref:`{0}`\n".format(search.groups()[0])
            sliceFile.content.append(line)

        sliceFile.write()
        index += 1
def sliceDocumentation(fileIn, outputDirectory):
	"""
	This Definition slices given documentation file.

	:param fileIn: File to convert. ( String )
	:param outputDirectory: Output directory. ( String )
	"""

	LOGGER.info("{0} | Slicing '{1}' file!".format(sliceDocumentation.__name__, fileIn))
	file = File(fileIn)
	file.cache()

	slices = OrderedDict()
	for i, line in enumerate(file.content):
		search = re.search(r"^\.\. \.(\w+)", line)
		if search:
			slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

	index = 0
	for slice, sliceStart in slices.iteritems():
		sliceFile = File(os.path.join(outputDirectory, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
		LOGGER.info("{0} | Outputing '{1}' file!".format(sliceDocumentation.__name__, sliceFile.path))
		sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \
		len(file.content)

		for i in range(sliceStart, sliceEnd):
			skipLine = False
			for item in CONTENT_DELETION:
				if re.search(item, file.content[i]):
					LOGGER.info("{0} | Skipping Line '{1}' with '{2}' content!".format(sliceDocumentation.__name__,
																						i,
																						item))
					skipLine = True
					break

			if skipLine:
				continue

			line = file.content[i]
			for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
				line = re.sub(pattern, value, line)

			search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
			if search:
				LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(sliceDocumentation.__name__,
																			i,
																			search.groups()[0]))
				line = "-  :ref:`{0}`\n".format(search.groups()[0])
			sliceFile.content.append(line)

		sliceFile.write()
		index += 1
Exemple #5
0
class ElementWithSubSections(Element):
    """ This class extends the default class Element. It offers you the power
    to add sections (SubSection) inside a element.
    The simple case is one section containing some elements.
    But in some situation you may represent your data like a tree.
    Section :
        Element1 : name = port, value = 389
        Element2 : name = address, value = 127.0.0.1
        ElementWithSubSections
            SubSection
                Element1
                Element2
    """

    def __init__(self, *args, **kwargs):
        super(ElementWithSubSections, self).__init__(*args, **kwargs)
        self.e_type = str
        self.sections = OrderedDict()

    def get_representation(self, prefix="", suffix="\n"):
        res = ['\n']
        temp_line = prefix + " - " + str(self._name) + " : "
        if self.hidden:
            temp_line += "xxxxxxxx" + suffix
        else:
            temp_line += str(self.value) + suffix
        res.append(temp_line)

        if len(self.sections) > 0:
            for elt in self.sections.values():
                res.append("".join(elt.get_representation(prefix + "\t")))
        return res

    def add_section(self, section):
        """You can add section inside a Element, the section must be a
        subclass of SubSection. You can use this class to represent a tree.
        """

        if not issubclass(section.__class__, SubSection):
            raise TypeError("Argument should be a subclass of SubSection, \
                             not :" + str(section.__class__))
        self.sections[section.name] = section
        return section

    def load(self, file_parser, section_name):
        self._load(file_parser, section_name)
        if len(self.sections) > 0:
            for sec in self.sections.values():
                sec.name = self.value
                sec.load(file_parser)
        self.post_load()
Exemple #6
0
def printClusters(tf_idf_values, dom_values, ind, centroids):
    xy_dict = dict(zip(tf_idf_values, dom_values))
    xy_dict_ordered = OrderedDict(
        sorted(xy_dict.items(), key=lambda item: item[0]))
    colors = ['r', 'y', 'm', 'c', 'b', 'g', 'r', 'y', 'm', 'c']
    ax.hold(True)
    ax.set_ylim(0, 1.1)
    ax.set_xlim(0, 1)
    #plt.grid(True, linestyle='-', color='0.75')
    ax.scatter(xy_dict_ordered.keys(),
               xy_dict_ordered.values(),
               s=30,
               c=colors[ind],
               marker='o',
               alpha=0.75,
               linewidths=.1)
    ax.plot(xy_dict_ordered.keys(),
            xy_dict_ordered.values(),
            linestyle='-',
            c=colors[ind],
            alpha=.40)
    # centroids
    ax.scatter(centroids[ind],
               1,
               marker='o',
               s=300,
               linewidths=1,
               c='w',
               alpha=0.60)
    ax.scatter(centroids[ind],
               1,
               marker='x',
               s=300,
               linewidths=1,
               c='k',
               alpha=0.60)
    ax.vlines(x=centroids[ind],
              ymin=0,
              ymax=1,
              color='k',
              linestyles='solid',
              alpha=0.40)

    #avg_dom = sum(dom_values)/len(dom_values)
    #min_val = min([tf_idf for index, tf_idf in enumerate(tf_idf_values) if dom_values[index] > .5])
    #max_val = max([tf_idf for index, tf_idf in enumerate(tf_idf_values) if dom_values[index] > .5])
    #ax.plot([min_val, centroids[ind], max_val], [0, 1, 0], linewidth=0.3, color='black')
    #print min_val, max_val
    canvas.print_figure('arun_plot.pdf', dpi=700)
Exemple #7
0
class SubSection(_Section):
    """ TODO """

    def get_representation(self, prefix="", suffix="\n"):
        res = []
        if self.count() > 0:
            res.append(prefix + "SubSection : "
                       + self.get_section_name().upper() + suffix)
            for elt in self.elements.values():
                res.append("".join(elt.get_representation(prefix)))
        return res

    def __copy__(self):
        newone = type(self)()
        newone.__dict__.update(self.__dict__)
        self.elements = OrderedDict()
        return newone

    # pylint: disable-msg=W0613
    def __deepcopy__(self, *args):
        newone = type(self)()
        newone.__dict__.update(self.__dict__)
        newone.elements = OrderedDict()
        for e in self.elements.values():
            newone.add_element(copy.deepcopy(e))
        return newone
Exemple #8
0
class JUnitResponder(plugins.Responder):
    """Respond to test results and write out results in format suitable for JUnit
    report writer. Only does anything if the app has batch_junit_format:true in its config file """
    def __init__(self, optionMap, *args):
        plugins.Responder.__init__(self)
        self.runId = getBatchRunName(optionMap)
        self.allApps = OrderedDict()
        self.appData = OrderedDict()

    def useJUnitFormat(self, app):
        return app.getBatchConfigValue("batch_junit_format") == "true"

    def notifyComplete(self, test):
        if not self.useJUnitFormat(test.app):
            return
        if not self.appData.has_key(test.app):
            self._addApplication(test)
        self.appData[test.app].storeResult(test)

    def notifyAllComplete(self):
        # allApps is {appname : [app]}
        for appList in self.allApps.values():
            # appData is {app : data}
            for app in appList:
                if self.useJUnitFormat(app):
                    data = self.appData[app]
                    ReportWriter(self.runId).writeResults(app, data)

    def _addApplication(self, test):
        app = test.app
        self.appData[app] = JUnitApplicationData()
        self.allApps.setdefault(app.name, []).append(app)
class EmailResponder(plugins.Responder):
    def __init__(self, optionMap, *args):
        plugins.Responder.__init__(self)
        self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date
        self.batchAppData = OrderedDict()
        self.allApps = OrderedDict()

    def notifyComplete(self, test):
        if test.app.emailEnabled():
            if not self.batchAppData.has_key(test.app):
                self.addApplication(test)
            self.batchAppData[test.app].storeCategory(test)

    def getRootSuite(self, test):
        if test.parent:
            return self.getRootSuite(test.parent)
        else:
            return test
       
    def addApplication(self, test):
        rootSuite = self.getRootSuite(test)
        app = test.app
        self.batchAppData[app] = BatchApplicationData(rootSuite)
        self.allApps.setdefault(app.name, []).append(app)
        
    def notifyAllComplete(self):
        mailSender = MailSender(self.runId)
        for appList in self.allApps.values():
            batchDataList = map(self.batchAppData.get, appList)
            mailSender.send(batchDataList)
class ProcessTerminationMonitor(plugins.Observable):
    def __init__(self):
        plugins.Observable.__init__(self)
        self.processesForKill = OrderedDict()
        self.exitHandlers = OrderedDict()

    def listRunningProcesses(self):
        processesToCheck = guiConfig.getCompositeValue("query_kill_processes", "", modeDependent=True)
        if "all" in processesToCheck:
            processesToCheck = [".*"]
        if len(processesToCheck) == 0:
            return []

        running = []
        triggerGroup = plugins.TextTriggerGroup(processesToCheck)
        for process, description in self.processesForKill.values():
            if triggerGroup.stringContainsText(description):
                running.append("PID " + str(process.pid) + " : " + description)

        return running

    def getProcessIdentifier(self, process):
        # Unfortunately the child_watch_add method needs different ways to
        # identify the process on different platforms...
        if os.name == "posix":
            return process.pid
        else:
            return process._handle

    def startProcess(
        self, cmdArgs, description="", killOnTermination=True, exitHandler=None, exitHandlerArgs=(), **kwargs
    ):
        process = subprocess.Popen(cmdArgs, stdin=open(os.devnull), **kwargs)
        pidOrHandle = self.getProcessIdentifier(process)
        self.exitHandlers[int(pidOrHandle)] = (exitHandler, exitHandlerArgs)
        if killOnTermination:
            self.processesForKill[int(pidOrHandle)] = (process, description)
        gobject.child_watch_add(pidOrHandle, self.processExited)

    def processExited(self, pid, *args):
        if self.processesForKill.has_key(pid):
            del self.processesForKill[pid]

        if self.exitHandlers.has_key(pid):
            exitHandler, exitHandlerArgs = self.exitHandlers.pop(pid)
            if exitHandler:
                exitHandler(*exitHandlerArgs)

    def notifyKillProcesses(self, sig=None):
        # Don't leak processes
        if len(self.processesForKill) == 0:
            return
        diag = logging.getLogger("kill processes")
        self.notify("Status", "Terminating all external viewers ...")
        for pid, (process, description) in self.processesForKill.items():
            if self.exitHandlers.has_key(pid):
                self.exitHandlers.pop(pid)  # don't call exit handlers in this case, we're terminating
            self.notify("ActionProgress")
            diag.info("Killing '" + description + "' interactive process")
            killSubProcessAndChildren(process, sig)
Exemple #11
0
 def load(self, json_obj, filters=None, formatters=None,
          ignore_exceptions={}):
     """TODO"""
     for json_row in json_obj:
         data = OrderedDict()
         for key in self.keys:
             try:
                 data[key] = json_row[key]
             except KeyError as ex:
                 data[key] = None
                 if not ignore_exceptions.get(key, None):
                     ignore_exceptions[key] = True
                     print "WARN: KeyError: " + str(ex)
         if self.filters(data, filters):
             if not self.raw:
                 self.formatters(data, formatters)
             self.add_row(data.values())
     if self._pref_start > 0:
         self.start = self._pref_start
         limit = self._pref_limit
         if limit > 0:
             self.end = self.start + limit
     elif self._pref_end > 0:
         self.start = len(self._rows) - self._pref_end
         limit = self._pref_limit
         if limit > 0:
             self.end = self.start + limit
 def values(self):
     # Fix for python 2.7... which calls __getitem__ internally
     origFile = self.readingFile
     self.readingFile = None
     ret = OrderedDict.values(self)
     self.readingFile = origFile
     return ret
Exemple #13
0
 def values(self):
     # Fix for python 2.7... which calls __getitem__ internally
     origFile = self.readingFile
     self.readingFile = None
     ret = OrderedDict.values(self)
     self.readingFile = origFile
     return ret
Exemple #14
0
    def edge_list(self):
        """ Return the list of edges for the derivatives of this workflow. """

        self._edges = super(CyclicWorkflow, self).edge_list()

        # TODO: Shouldn't have to do this everytime.
        if len(self._mapped_severed_edges) > 0:

            cyclic_edges = OrderedDict()
            for edge in self._mapped_severed_edges:
                cyclic_edges[edge[0]] = edge[1]

            # Finally, modify our edge list to include the severed edges, and
            # exclude the boundary edges.
            for src, targets in self._edges.iteritems():
                if '@in' not in src or \
                   not any(edge in cyclic_edges.values() for edge in targets):
                    if isinstance(targets, str):
                        targets = [targets]

                    newtargets = []
                    for target in targets:
                        if '@out' not in target or \
                           src not in cyclic_edges:
                            newtargets.append(target)

                    if len(newtargets) > 0:
                        cyclic_edges[src] = newtargets

            self._edges = cyclic_edges

        return self._edges
class JUnitResponder(plugins.Responder):
    """Respond to test results and write out results in format suitable for JUnit
    report writer. Only does anything if the app has batch_junit_format:true in its config file """
    
    def __init__(self, optionMap, *args):
        plugins.Responder.__init__(self)
        self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date
        self.allApps = OrderedDict()
        self.appData = OrderedDict()

    def useJUnitFormat(self, app):
        return app.getBatchConfigValue("batch_junit_format") == "true"
    
    def notifyComplete(self, test):
        if not self.useJUnitFormat(test.app):
            return
        if not self.appData.has_key(test.app):
            self._addApplication(test)
        self.appData[test.app].storeResult(test)
        
    def notifyAllComplete(self):
        # allApps is {appname : [app]}
        for appList in self.allApps.values():
            # appData is {app : data}
            for app in appList:
                if self.useJUnitFormat(app):
                    data = self.appData[app]
                    ReportWriter(self.runId).writeResults(app, data)
      
    def _addApplication(self, test):
        app = test.app
        self.appData[app] = JUnitApplicationData()
        self.allApps.setdefault(app.name, []).append(app)
    def edge_list(self):
        """ Return the list of edges for the derivatives of this workflow. """

        self._edges = super(CyclicWorkflow, self).edge_list()

        # TODO: Shouldn't have to do this everytime.
        if len(self._mapped_severed_edges) > 0:

            cyclic_edges = OrderedDict()
            for edge in self._mapped_severed_edges:
                cyclic_edges[edge[0]] = edge[1]

            # Finally, modify our edge list to include the severed edges, and exclude
            # the boundary edges.
            for src, targets in self._edges.iteritems():
                if '@in' not in src or \
                   not any(edge in cyclic_edges.values() for edge in targets):
                    if isinstance(targets, str):
                        targets = [targets]

                    newtargets = []
                    for target in targets:
                        if '@out' not in target or \
                           src not in cyclic_edges:
                            newtargets.append(target)

                    if len(newtargets) > 0:
                        cyclic_edges[src] = newtargets

            self._edges = cyclic_edges

        return self._edges
 def test_iterators(self):
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     shuffle(pairs)
     od = OrderedDict(pairs)
     self.assertEqual(list(od), [t[0] for t in pairs])
     self.assertEqual(list(od.keys()), [t[0] for t in pairs])
     self.assertEqual(list(od.values()), [t[1] for t in pairs])
     self.assertEqual(list(od.items()), pairs)
     self.assertEqual(list(reversed(od)), [t[0] for t in reversed(pairs)])
 def test_iterators(self):
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     shuffle(pairs)
     od = OrderedDict(pairs)
     self.assertEqual(list(od), [t[0] for t in pairs])
     self.assertEqual(list(od.keys()), [t[0] for t in pairs])
     self.assertEqual(list(od.values()), [t[1] for t in pairs])
     self.assertEqual(list(od.items()), pairs)
     self.assertEqual(list(reversed(od)),
                      [t[0] for t in reversed(pairs)])
def printClusters(tf_idf_values, dom_values, ind, centroids):
    xy_dict = dict(zip(tf_idf_values, dom_values))
    xy_dict_ordered = OrderedDict(sorted(xy_dict.items(), key=lambda item: item[0]))
    colors = ['r', 'y', 'm', 'c', 'b', 'g', 'r', 'y', 'm', 'c']
    ax.hold(True)
    ax.set_ylim(0, 1.1)
    ax.set_xlim(0, 1)
    #plt.grid(True, linestyle='-', color='0.75')
    ax.scatter(xy_dict_ordered.keys(), xy_dict_ordered.values(), s=30, c=colors[ind], marker='o', alpha=0.75, linewidths=.1)
    ax.plot(xy_dict_ordered.keys(), xy_dict_ordered.values(), linestyle='-', c=colors[ind], alpha=.40)
    # centroids
    ax.scatter(centroids[ind], 1, marker='o', s=300, linewidths=1, c='w', alpha=0.60)
    ax.scatter(centroids[ind], 1, marker='x', s=300, linewidths=1, c='k', alpha=0.60 )
    ax.vlines(x=centroids[ind], ymin=0, ymax=1, color='k', linestyles='solid', alpha=0.40)

    #avg_dom = sum(dom_values)/len(dom_values)
    #min_val = min([tf_idf for index, tf_idf in enumerate(tf_idf_values) if dom_values[index] > .5])
    #max_val = max([tf_idf for index, tf_idf in enumerate(tf_idf_values) if dom_values[index] > .5])
    #ax.plot([min_val, centroids[ind], max_val], [0, 1, 0], linewidth=0.3, color='black')
    #print min_val, max_val
    canvas.print_figure('arun_plot.pdf', dpi=700)
Exemple #20
0
class GenerateTestSummary(guiplugins.ActionDialogGUI):
    def __init__(self, *args):
        guiplugins.ActionDialogGUI.__init__(self, *args)
        self.addOption("generate",
                       "",
                       possibleDirs=[os.getenv("TEXTTEST_TMP", "")],
                       saveFile=True)
        self.batchAppData = OrderedDict()
        self.allApps = OrderedDict()

    def performOnCurrent(self):
        fileName = self.getFileName()
        for test in self.currTestSelection:
            if test.state.isComplete():
                if not self.batchAppData.has_key(test.app):
                    self.addApplication(test)
                self.batchAppData[test.app].storeCategory(test)
        self.writeTextSummary(fileName)

    def writeTextSummary(self, fileName):
        mailSender = MailSender()
        with open(fileName, "w") as f:
            for appList in self.allApps.values():
                batchDataList = map(self.batchAppData.get, appList)
                f.write(mailSender.makeContents(batchDataList, False))

    def getFileName(self):
        fileName = self.optionGroup.getOptionValue("generate")
        if not fileName:
            raise plugins.TextTestError, "Cannot save selection - no file name specified"
        elif os.path.isdir(fileName):
            raise plugins.TextTestError, "Cannot save selection - existing directory specified"
        else:
            return fileName

    def _getTitle(self):
        return "Generate test summary"

    def getRootSuite(self, test):
        if test.parent:
            return self.getRootSuite(test.parent)
        else:
            return test

    def addApplication(self, test):
        rootSuite = self.getRootSuite(test)
        app = test.app
        self.batchAppData[app] = BatchApplicationData(rootSuite)
        self.allApps.setdefault(app.name, []).append(app)
    def addAllPlots(self, graph, results, *args):
        prevYlist = [ 0 ] * len(results)
        plotData = OrderedDict()
        for category in self.labels.keys():
            currYlist = [ summary.get(category, 0) for _, summary in results ]
            if self.hasNonZero(currYlist):
                ylist = [ (currYlist[x] + prevYlist[x]) for x in range(len(prevYlist)) ]
                plotData[category] = prevYlist, ylist
                prevYlist = ylist

        for category in reversed(plotData.keys()):
            prevYlist, ylist = plotData[category]
            if not self.hasNonZero(prevYlist):
                # Adjust the bottom of the graph to avoid a huge block of green for large suites
                prevYlist = [ self.getGraphMinimum(ylist, plotData.values()[-1][-1]) ] * len(ylist)
            self.addPlot(prevYlist, ylist, graph, category=category, *args)
class GenerateTestSummary(guiplugins.ActionDialogGUI):
    def __init__(self, *args):
        guiplugins.ActionDialogGUI.__init__(self, *args)
        self.addOption("generate", "",possibleDirs=[os.getenv("TEXTTEST_TMP", "")], saveFile=True)
        self.batchAppData = OrderedDict()
        self.allApps = OrderedDict()

    def performOnCurrent(self):
        fileName = self.getFileName()
        for test in self.currTestSelection:
            if test.state.isComplete():
                if not self.batchAppData.has_key(test.app):
                    self.addApplication(test)
                self.batchAppData[test.app].storeCategory(test)
        self.writeTextSummary(fileName)
        
    def writeTextSummary(self, fileName):
        mailSender = MailSender()
        with open(fileName, "w") as f:
            for appList in self.allApps.values():
                batchDataList = map(self.batchAppData.get, appList)
                f.write(mailSender.makeContents(batchDataList, False))

    def getFileName(self):
        fileName = self.optionGroup.getOptionValue("generate")
        if not fileName:
            raise plugins.TextTestError, "Cannot save selection - no file name specified"
        elif os.path.isdir(fileName):
            raise plugins.TextTestError, "Cannot save selection - existing directory specified"
        else:
            return fileName


    def _getTitle(self):
        return "Generate test summary"

    def getRootSuite(self, test):
        if test.parent:
            return self.getRootSuite(test.parent)
        else:
            return test
       
    def addApplication(self, test):
        rootSuite = self.getRootSuite(test)
        app = test.app
        self.batchAppData[app] = BatchApplicationData(rootSuite)
        self.allApps.setdefault(app.name, []).append(app)
Exemple #23
0
class MainApp(object):

    def __init__(self, name, cmds, config='app.cfg', host='127.0.0.1', port='5000', script_url=SCRIPT_URL):
        self.name = name
        self.cmds = OrderedDict([(c.name, c) for c in cmds])
        self.app = Flask(__name__)
        self.config = os.path.abspath(config)
        # Not being used!
        self.app.config.from_pyfile(self.config)
        # Directories with contents displayed in the page
        self.dirs = []
        self.host = host
        self.port = port

        # Create the url_rules for the Forms
        for i, cmd in enumerate(self.cmds.values()):
            self.app.add_url_rule( SCRIPT_URL + (cmd.name if i > 0 else '')
                                 , cmd.name
                                 , partial(self.form, cmd.name)
                                 , methods=['GET', 'POST'])

        # Create the url_rules for serving Form's files directories
        for c in cmds:
            for d in c.dirs:
                self.app.add_url_rule( "{}{}/<path:filename>".format(SCRIPT_URL, d)
                                     , "{}-{}".format(cmd.name, d)
                                     , partial(self.serve_files, d)
                                     , methods=['GET'])
                self.dirs.append(DirContents(d))

    def run(self):
       self.app.run(debug=True, host=self.host)

    def serve_files(self, dir, filename):
        return send_from_directory(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '{}')).format(dir), filename)

    def form(self, cmd_name):
        f = self.cmds[cmd_name]
        self.active = cmd_name
        f.stdout = ''

        if request.method == 'POST':
            f.process(request.form)
            if f.form.validate():
                f.run()

        return render_template('form.html', form=f.fields_list(), desc=Markup(f.desc), dirs=self.dirs, output_type=f.output_type, output=f.stdout, app=self)
Exemple #24
0
class XLSSheetDefinition(object):
    def __init__(self, heading_row=0, fields=None):
        self.heading_row = heading_row
        self.fields = fields if fields is not None else []

    def load_yaml(self, filepath):
        self.fields = OrderedDict()
        with open(filepath, "r") as fp:
            data = yaml.load(fp)
            self.heading_row = data.get("heading_row", self.heading_row)
            for fielddef in data.get("fields", []):
                for name, fdef in fielddef.iteritems():
                    field = XLSField(name)
                    if fdef is not None:
                        for key, val in fdef.iteritems():
                            setattr(field, key, val)
                    self.fields[name] = field

    def names(self):
        return self.fields.keys()
    
    def unique(self):
        return [f for f in self.fields.values() if f.unique]

    def multiple(self):
        return [f for f in self.fields.values() if f.multiple]

    def i18n(self):
        return [f for f in self.fields.values() if f.i18n]

    def required(self):
        return [f for f in self.fields.values() if f.required]

    def date(self):
        return [f for f in self.fields.values() if f.type=="date"]

    def oftype(self, type):
        return [f for f in self.fields.values() if f.type==type]

    def limited(self):
        return [f for f in self.fields.values() if f.limit]

    def choices(self):
        return [f for f in self.fields.values() if f.choices is not None]
Exemple #25
0
    def _build(self, dirinfo):
        if self._belongs_to_exclude(dirinfo):
            return BinaryItem(dirinfo.name, filename=dirinfo.filename)
        files = OrderedDict((x.name, x) for x
            in unique_sorted_listing(dirinfo.filename,
                extension_priority=self.extension_priority))

        if self.directory_item_name in files \
          and text_file(files[self.directory_item_name].filename):
            info = files.pop(self.directory_item_name)
            factory = query_item_factory(info.extension, default=ContentItem)
            item = lambda children: factory(
                dirinfo.name, info.filename, info.extension, children=children)
        else:
            item = lambda children: Item(dirinfo.name, children=children)
        children = [self._build(x) for x in files.values()]
        return item(children)
Exemple #26
0
def suvan():
	s = Session()
	try:
		pictures = s.query(Picture).order_by(desc(Picture.date_uploaded)).all()
		d = OrderedDict()
		for p in pictures:
			dstr = p.date_uploaded.strftime('%B %d, %Y')
			if not d.has_key(dstr):
				d[dstr] = {}
				d[dstr]['date'] = dstr
				d[dstr]['age'] = getAge(p.date_uploaded)
				d[dstr]['pictures'] = []

			d[dstr]['pictures'].append(p)

		return render_template('suvan.html', pictures=d.values())
	finally:
		s.close()
Exemple #27
0
def list_ (request):
	priority = map(int, request.GET.getall('pr'))

	tags = request.GET.get('tags', '')
	def repl (matchobj):
		m = matchobj.group(0)
		if m in ('or', 'and', 'not', '(', ')'):
			return m.upper()
		else:
			return u"tags LIKE '%%#%s#%%'" % m
	tags = re.sub(RE_TAG_Q, repl, tags)

	entries = OrderedDict()
	q = Entry.query.order_by(Entry.priority.desc(), Entry.created_time.desc())
	if tags:
		q = q.filter(tags)
	if priority:
		q = q.filter(Entry.priority.in_(priority))
	for e in q:
		e.children = []
		entries[e.id] = e

	keys_to_del = []
	items = entries.items()
	for id, e in items:
		if e.parent_id:
			keys_to_del.append(id)
			if e.parent_id not in entries: #for filter
				entries[e.parent_id] = e.parent
				e.parent.children = []
				items.append((e.parent_id, e.parent)) #visit parents too later so hierarchy doesnt breake
			entries[e.parent_id].children.append(e)
	for id in keys_to_del:
		del entries[id]

	if tags:
		entries = OrderedDict(sorted(entries.items(), key = (lambda (k, e): e.priority), reverse = True))

	return {
		'entries': entries.values(),
		'priorities': priorities,
	}
Exemple #28
0
    def addAllPlots(self, graph, results, *args):
        prevYlist = [0] * len(results)
        plotData = OrderedDict()
        for category in self.labels.keys():
            currYlist = [summary.get(category, 0) for _, summary in results]
            if self.hasNonZero(currYlist):
                ylist = [(currYlist[x] + prevYlist[x])
                         for x in range(len(prevYlist))]
                plotData[category] = prevYlist, ylist
                prevYlist = ylist

        for category in reversed(plotData.keys()):
            prevYlist, ylist = plotData[category]
            if not self.hasNonZero(prevYlist):
                # Adjust the bottom of the graph to avoid a huge block of green for large suites
                prevYlist = [
                    self.getGraphMinimum(ylist,
                                         plotData.values()[-1][-1])
                ] * len(ylist)
            self.addPlot(prevYlist, ylist, graph, category=category, *args)
Exemple #29
0
class ListSection(_AbstractSection):
    """ TODO """
    def __init__(self, name, *args, **kwargs):
        super(ListSection, self).__init__(*args, **kwargs)
        self.elements = OrderedDict()
        self._name = name

    def load(self, file_parser):

        section = self.get_section_name()
        try:

            # TODO : ? : data = data.decode(locale.getpreferredencoding())
            for key in [item for item in file_parser.options(section)
                        if item not in file_parser.defaults().keys()]:
                self.elements[key] = file_parser.get(section, key)
        except ConfigParser.NoSectionError as e:
            # pylint: disable-msg=W0621
            log = logging.getLogger('argtoolbox')
            if self._required:
                log.error("Required section : " + section)
                raise ValueError(e)
            else:
                log.debug("Missing section : " + section)

    def reset(self):
        for e in self.elements.values():
            e.reset()

    def get_representation(self, prefix="", suffix="\n"):
        res = []
        res.append(prefix + "Section " + self._name + suffix)

        for key, val in self.elements.items():
            a = []
            a.append(prefix)
            a.append(" - " + str(key) + " : " + str(val))
            a.append(suffix)
            res.append("".join(a))
        return res
Exemple #30
0
def group_member_export(request, group_slug):
    """
    Export all group members for a specific group
    """
    group = get_object_or_404(Group, slug=group_slug)

    # if they can edit it, they can export it
    if not has_perm(request.user, 'user_groups.change_group', group):
        raise Http403

    import xlwt
    from ordereddict import OrderedDict
    from django.db import connection

    # create the excel book and sheet
    book = xlwt.Workbook(encoding='utf8')
    sheet = book.add_sheet('Group Members')

    # the key is what the column will be in the
    # excel sheet. the value is the database lookup
    # Used OrderedDict to maintain the column order
    group_mappings = OrderedDict([
        ('user_id', 'au.id'),
        ('first_name', 'au.first_name'),
        ('last_name', 'au.last_name'),
        ('email', 'au.email'),
        ('receives email', 'pp.direct_mail'),
        ('company', 'pp.company'),
        ('address', 'pp.address'),
        ('address2', 'pp.address2'),
        ('city', 'pp.city'),
        ('state', 'pp.state'),
        ('zipcode', 'pp.zipcode'),
        ('country', 'pp.country'),
        ('phone', 'pp.phone'),
        ('is_active', 'au.is_active'),
        ('date', 'gm.create_dt'),
    ])
    group_lookups = ','.join(group_mappings.values())

    # Use custom sql to fetch the rows because we need to
    # populate the user profiles information and you
    # cannot do that with django's ORM without using
    # profile for each user query
    # pulling 13,000 group members can be done in one
    # query using Django's ORM but then you need
    # 13,000 individual queries :(
    cursor = connection.cursor()
    sql = "SELECT %s FROM user_groups_groupmembership gm \
           INNER JOIN auth_user au ON (au.id = gm.member_id) \
           LEFT OUTER JOIN profiles_profile pp \
           on (pp.user_id = gm.member_id) WHERE group_id = %%s;"

    sql = sql % group_lookups
    cursor.execute(sql, [group.pk])
    values_list = list(cursor.fetchall())

    # Append the heading to the list of values that will
    # go into the excel sheet
    values_list.insert(0, group_mappings.keys())

    # excel date styles
    default_style = xlwt.Style.default_style
    datetime_style = xlwt.easyxf(num_format_str='mm/dd/yyyy hh:mm')
    date_style = xlwt.easyxf(num_format_str='mm/dd/yyyy')

    if values_list:
        # Write the data enumerated to the excel sheet
        for row, row_data in enumerate(values_list):
            for col, val in enumerate(row_data):
                # styles the date/time fields
                if isinstance(val, datetime):
                    style = datetime_style
                elif isinstance(val, date):
                    style = date_style
                else:
                    style = default_style
                sheet.write(row, col, val, style=style)

    response = HttpResponse(content_type='application/vnd.ms-excel')
    response[
        'Content-Disposition'] = 'attachment; filename=group_%s_member_export.xls' % group.pk
    book.save(response)
    return response
    def update(self):
        # Allow the source to provide terms until we have more specific ones
        # from the query.
        # Things do not go well if self.terms is None
        self._bound_source = None
        source = self.bound_source
        terms = OrderedDict()

        # populate select values if needed
        if self.populate_select:
            for t in self.source:
                if not t.token in terms:
                    terms[t.token] = t

        # pre defined terms from context+source
        self.terms = SourceTerms(self.context, self.request, self.form, self.field, self, source)

        # If we have values in the request,
        # use these to get the terms.
        request_values = z3c.form.interfaces.NOVALUE
        # extract selected value
        if not self.ignoreRequest:
            request_values = self.extract(
                default=z3c.form.interfaces.NOVALUE)

        if request_values is not z3c.form.interfaces.NOVALUE:
            if not isinstance(request_values, (tuple, set, list)):
                request_values = (request_values,)

            for token in request_values:
                if not token or token == self.noValueToken:
                    continue
                try:
                    t = source.getTermByToken(token)
                    terms[t.token] = t
                except LookupError:
                    # Term no longer available
                    if not self.ignoreMissing:
                        raise

        # take the value from the current saved value
        # if there is an existing adapter allowing it
        if not self.ignoreContext:
            selection = zope.component.getMultiAdapter(
                (self.context, self.field),
                z3c.form.interfaces.IDataManager).query()
            if selection is z3c.form.interfaces.NOVALUE:
                selection = []
            elif not isinstance(selection,
                                (tuple, set, list)):
                selection = [selection]
            for value in selection:
                if not value:
                    continue
                try:
                    t = source.getTerm(value)
                    terms[t.token] = t
                except LookupError:
                    # Term no longer available
                    if not self.ignoreMissing:
                        raise
        # re-set terms with values from request
        self.terms = QueryTerms(self.context, self.request, self.form, self.field, self, terms.values())

        # update widget selected value if any
        select.SelectWidget.update(self)
Exemple #32
0
class Controller(object):
    def __init__(self, args={}):
        self.args = args # arguments from command line
        self.config = {} # config to be processed from .dexy files
        self.docs = []
        self.timing = []
        self.virtual_docs = []

        self.batch_start_time = None
        self.batch_finish_time = None
        self.batch_elapsed_time = None

        # Set up logging
        if args.has_key("logsdir") and args.has_key("logfile"):
            self.log = dexy.utils.get_log("dexy.controller", args['logsdir'], args['logfile'], args['loglevel'])
        else:
            self.log = Constants.NULL_LOGGER

        # Set up db
        if args.has_key('dbclass') and args.has_key("logsdir") and args.has_key("dbfile"):
            self.db = dexy.utils.get_db(self.args['dbclass'], logsdir=self.args['logsdir'], dbfile=args['dbfile'])
        else:
            self.db = None

        # List of directories that reporters use, these will not be processed by dexy
        self.reports_dirs = dexy.introspect.reports_dirs(self.log)

        # list of artifact classes - if nothing else uses this then move
        # it into the if statement below and don't cache it

        self.artifact_classes = dexy.introspect.artifact_classes(self.log)
        if args.has_key('artifactclass'):
            if self.artifact_classes.has_key(args['artifactclass']):
                self.artifact_class = self.artifact_classes[args['artifactclass']]
            else:
                raise dexy.commands.UserFeedback("Artifact class name %s not found in %s" % (args['artifactclass'], ",".join(self.artifact_classes.keys())))


    def run(self):
        """
        This does all the work.
        """
        self.batch_start_time = time.time()
        start = self.batch_start_time

        self.log.debug("populating Document class filter list")
        dexy.document.Document.filter_list = dexy.introspect.filters(self.log)
        self.timing.append(("populate-filter-list", time.time() - start))
        start = time.time()

        self.log.debug("loading config...")
        self.load_config()
        self.log.debug("finished loading config.")
        self.timing.append(("load-config", time.time() - start))
        start = time.time()

        self.log.debug("processing config, populating document list...")
        self.process_config()
        self.log.debug("finished processing config.")
        self.timing.append(("process-config", time.time() - start))
        start = time.time()

        # set the list of documents which are virtual
        self.virtual_docs = [d for d in self.docs if d.virtual]

        try:
            if not self.args['dryrun']:
                [doc.setup() for doc in self.docs]
                self.docs = [doc.run() for doc in self.docs]
        except dexy.commands.UserFeedback as e:
            self.persist()
            raise e

        self.timing.append(("run-docs", time.time() - start))

        self.batch_finish_time = time.time()
        self.batch_elapsed_time = self.batch_finish_time - self.batch_start_time

        self.log.debug("persisting batch info...")
        self.persist()
        self.log.debug("finished persisting.")
        self.log.debug("finished processing. elapsed time %s" % self.batch_elapsed_time)

    def persist(self):
        """
        Persists the database. Saves some information about this batch in a
        JSON file (for use by reporters or for debugging).
        """
        self.db.persist()
        dexy.utils.save_batch_info(self.batch_id, self.batch_info(), self.args['logsdir'])

    def batch_info(self):
        """
        Dict of info to save
        """
        return {
            "id" : self.batch_id,
            "config" : self.config,
            "args" : self.args,
            "docs" : dict((doc.key(), doc.document_info()) for doc in self.docs),
            "start_time" : self.batch_start_time,
            "finish_time" : self.batch_finish_time,
            "elapsed" : self.batch_elapsed_time,
            "timing" : self.timing
            }

    def config_for_directory(self, path):
        """
        Determine the config applicable within a directory by looking in every
        parent directory (up as far as the dexy project root) for config files
        and combining them, such that subdirectories override parents.
        """
        self.log.debug("Determining configuration applicable in %s" % path)

        global_args = {}
        config_dict = {}
        variables = {}
        config_file = self.args['config']

        path_elements = path.split(os.sep)

        for i in range(0,len(path_elements)+1):
            config_path = os.path.join(*(path_elements[0:i] + [config_file]))
            config_files = glob.glob(config_path)

            # Don't propagate virtual files
            for k in config_dict.keys():
                propagate_virtual = config_dict[k].has_key('propagate') and config_dict[k]['propagate']
                if k.startswith("@") and not propagate_virtual:
                    del config_dict[k]

            for f in config_files:
                self.log.info("loading config file %s" % f)

                with open(f, "r") as cf:
                    try:
                        json_dict = json.load(cf)
                    except ValueError as e:
                        msg = "Your config file %s has invalid JSON\n%s" % (f, e.message)
                        raise dexy.commands.UserFeedback(msg)

                if json_dict.has_key("$reset"):
                    # Reset the config, i.e. ignore everything from parent
                    # directories, just use this directory's config in json_dict
                    config_dict = json_dict
                else:
                    # Combine any config in this dir with parent dir config.
                    config_dict.update(json_dict)

                if json_dict.has_key("$globals"):
                    global_args.update(json_dict["$globals"])

                if json_dict.has_key("$variables"):
                    variables.update(json_dict["$variables"])

        config_dict['$globals'] = global_args
        config_dict['$variables'] = variables
        return config_dict

    def load_config(self):
        """
        This method determines which subdirectories will be included in the
        dexy batch and populates the config dict for each of them.
        """
        if self.args['recurse']:

            # Figure out which directories need to be skipped
            exclude_at_root = Constants.EXCLUDE_DIRS_ROOT + self.reports_dirs + [self.args['artifactsdir'], self.args['logsdir']]
            self.log.debug("project root excluded directories %s" % ", ".join(exclude_at_root))

            exclude_everywhere = Constants.EXCLUDE_DIRS_ALL_LEVELS
            self.log.debug("directories excluded at all levels %s" % ", ".join(exclude_everywhere))

            for dirpath, dirnames, filenames in os.walk(self.args['directory']):
                # Figure out if we should process this directory and recurse
                # into its children. Start with process_dir = True
                process_dir = True

                # Remove any children we don't want to recurse into.
                if dirpath == ".":
                    for x in exclude_at_root:
                        if x in dirnames:
                            dirnames.remove(x)
                for x in exclude_everywhere:
                    if x in dirnames:
                        dirnames.remove(x)

                # Look for a .nodexy file
                if os.path.isfile(os.path.join(dirpath, '.nodexy')):
                    # If we find one...
                    self.log.info(".nodexy file found in %s" % dirpath)

                    # ...remove all child dirs from processing...
                    for i in xrange(len(dirnames)):
                        dirnames.pop()

                    # ...and skip this directory.
                    process_dir = False

                # Check if we match any excludes specified on the command line
                args_exclude = self.args['exclude']
                if isinstance(args_exclude, str):
                    args_exclude = args_exclude.split()
                for pattern in args_exclude:
                    for d in dirnames:
                        m1 = re.match(pattern, d)
                        m2 = re.match("./%s" % pattern, d)
                        m3 = re.match("%s/" % pattern, d)
                        m4 = re.match("./%s/" % pattern, d)
                        if m1 or m2 or m3 or m4:
                            dirnames.remove(d)

                if process_dir:
                    self.config[dirpath] = self.config_for_directory(dirpath)
            else:
                # Not recursing
                dirpath = self.args['directory']
                self.config[dirpath] = self.config_for_directory(dirpath)

    def process_config(self):
        """
        Processes a populated config dict, identifies files to be processed,
        creates Document objects for each, links dependencies and finally does
        topological sort to establish order of batch run.
        """

        # Define the parse_doc nested function which we will call recursively.
        def parse_doc(path, input_directive, args = {}):
            # If a specification is nested in a dependency, then input_directive
            # may be a dict. If so, split it into parts before continuing.
            try:
                a, b = input_directive.popitem()
                input_directive = a
                args = b
            except AttributeError:
                pass

            tokens = input_directive.split("|")
            if "/" in tokens[0]:
                raise dexy.commands.UserFeedback("paths not allowed in tokens: %s" % tokens[0])
            if path == '.':
                glob_string = tokens[0]
            else:
                glob_string = os.path.join(re.sub("^\./", "", path), tokens[0])
            filters = tokens[1:]

            docs = []

            # virtual document
            if re.search("@", glob_string):
                virtual = True
                dangerous = any(k in ['url', 'repo', 'path'] for k in args)
                if dangerous and not self.args['danger']:
                    msg = "You are attempting to access a remote file %s." % glob_string
                    msg += " You must specify -danger option to do this.\n"
                    raise dexy.commands.UserFeedback(msg)
                glob_string = glob_string.replace("@", "")
            else:
                virtual = False

            regex = fnmatch.translate(glob_string).replace(".*", "(.*)")
            matcher = re.compile(regex)

            files = glob.glob(glob_string)

            nofiles = len(files) == 0

            if nofiles and virtual:
                files = [glob_string]

            for f in files:
                create = True
                if not virtual:
                    if os.path.isdir(f):
                        create = False

                if args.has_key('disabled'):
                    if args['disabled']:
                        create = False
                        self.log.warn("document %s|%s disabled" % (f, "|".join(filters)))

                inputs = []
                if args.has_key('inputs'):
                    if isinstance(args['inputs'], str) or isinstance(args['inputs'], unicode):
                        raise dexy.commands.UserFeedback("inputs for %s should be an array" % f)
                    for i in args['inputs']:
                        # Create document objects for input patterns (just in this directory)
                        for doc in parse_doc(path, i):
                            inputs.append(doc.key())


                m = matcher.match(f)
                if m and len(m.groups()) > 0:
                    rootname = matcher.match(f).group(1)

                # The 'ifinput' directive says that if an input exists matching
                # the specified pattern, we should create this document and it
                # will depend on the specified input.
                if args.has_key('ifinput'):
                    if isinstance(args['ifinput'], str) or isinstance(args['ifinput'], unicode):
                        ifinputs = [args['ifinput']]
                    else:
                        self.log.debug("treating input %s as iterable. class: %s" % (
                            args['ifinput'], args['ifinput'].__class__.__name__))
                        ifinputs = args['ifinput']

                    for s in ifinputs:
                        self.log.debug("evaluating ifinput %s" % s)
                        ifinput = s.replace("%", rootname)
                        self.log.debug("evaluating ifinput %s" % ifinput)
                        input_docs = parse_doc(path, ifinput, {})
                        for input_doc in input_docs:
                            inputs.append(input_doc.key())

                    if len(input_docs) == 0:
                        create = False

                if args.has_key('ifnoinput'):
                    ifinput = args['ifnoinput'].replace("%", rootname)
                    input_docs = parse_doc(path, ifinput, {})

                    if len(input_docs) > 0:
                        create = False

                if args.has_key('except'):
                    try:
                        except_re = re.compile(args['except'])
                    except sre_constants.error as e:
                        raise dexy.commands.UserFeedback("""You passed 'except' value of %s.
Please pass a valid Python-style regular expression for
'except', NOT a glob-style matcher. Error message from
re.compile: %s""" % (args['except'], e))
                    if re.match(except_re, f):
                        self.log.warn("skipping %s for %s as it matches except pattern %s" % (
                                f,
                                input_directive,
                                args['except']
                                ))
                        create = False

                if create:
                    doc = dexy.document.Document()
                    doc.set_controller(self)

                    # Filters can either be included in the name...
                    doc.set_name_and_filters(f, filters)
                    # ...or they may be listed explicitly.
                    if args.has_key('filters'):
                        doc.filters += args['filters']

                    if args.has_key('loglevel'):
                        doc.loglevelname = args['loglevel']
                    doc.setup_log() # After name has been set
                    doc.virtual = virtual

                    key = doc.key()
                    self.log.debug("creating doc %s for glob %s" % (key, glob_string))

                    if self.members.has_key(key):
                        doc = self.members[key]

                    if args.has_key('priority'):
                        doc.priority = args['priority']
                        del args['priority']

                    doc.args.update(args)

                    if args.has_key('allinputs'):
                        doc.use_all_inputs = args['allinputs']

                    if args.has_key('inputs'):
                        doc.input_args = copy.copy(args['inputs'])
                        doc.input_keys = []

                    for i in inputs:
                        doc.add_input_key(i)

                    self.members[key] = doc
                    docs.append(doc) # docs is a local list of docs

            return docs # end of parse_doc nested function

        def get_pos(member):
            key = member.key()
            return self.members.keys().index(key)

        def depend(parent, child):
            self.depends.append((get_pos(child), get_pos(parent)))

        # The real processing starts here.
        self.members = OrderedDict()
        self.depends = []

        self.batch_id = self.db.next_batch_id()
        if not self.args['silent']:
            print "batch id is", self.batch_id

        for path, config in self.config.iteritems():
            ### @export "features-global-args-1"
            if config.has_key("$globals"):
                global_args = config["$globals"]
            else:
                global_args = {}

            if config.has_key("$variables"):
                global_variables = config["$variables"]
            else:
                global_variables = {}

            if self.args.has_key('globals'):
                global_args.update(self.args['globals'])

            for k, v in config.iteritems():
                local_args = global_args.copy()
                local_args.update(v)
                local_args['$variables'] = global_variables
                for kg in global_args.keys():
                    if local_args.has_key(kg):
                        if isinstance(local_args[kg], dict):
                            local_args[kg].update(global_args[kg])
                parse_doc(path, k, local_args)
            ### @end

        # Determine dependencies
        total_dependencies = 0
        self.log.debug("Finalizing dependencies between documents...")
        for doc in self.members.values():
            doc.finalize_inputs(self.members)
            total_dependencies += len(doc.inputs)
            for input_doc in doc.inputs:
                depend(doc, input_doc)

            self.log.debug("finalized dependencies for %s" % doc.key())
            if len(doc.inputs) > 10:
                self.log.debug("%s inputs added" % len(doc.inputs))
            elif len(doc.inputs) == 0:
                self.log.debug("no inputs added")
            else:
                self.log.debug("inputs added: %s" % ", ".join(d.key() for d in doc.inputs))

        if len(self.args['run']) > 0:
            # Only run the specified document, and its dependencies.
            new_members = OrderedDict()
            new_depends = []

            def new_get_pos(member):
                key = member.key()
                return new_members.keys().index(key)

            def new_depend(parent, child):
                new_depends.append((new_get_pos(child), new_get_pos(parent)))

            def parse_new_document(d):
                new_members[d.key()] = d
                for input_doc in d.inputs:
                    if not input_doc.key() in new_members.keys():
                        new_members[input_doc.key()] = input_doc
                    new_depend(d, input_doc)
                    parse_new_document(input_doc)

            run_key = self.args['run']
            if self.members.has_key(run_key):
                doc = self.members[run_key]
            else:
                matches = [k for k in self.members.keys() if k.startswith(run_key)]
                matches.sort(key=lambda k: len(self.members[k].inputs))
                doc = self.members[matches[-1]]
            parse_new_document(doc)

            if not self.args['silent']:
                print "limiting members list to %s and its dependencies, %s/%s documents will be run" % (doc.key(), len(new_members), len(self.members))
            self.members = new_members
            self.depends = new_depends

        num_members = len(self.members)
        if num_members > 0:
            dep_ratio = float(total_dependencies)/num_members
        else:
            dep_ratio = None

        if not self.args['silent']:
            print "sorting %s documents into run order, there are %s total dependencies" % (num_members, total_dependencies)
            if dep_ratio:
                print "ratio of dependencies to documents is %0.1f" % (dep_ratio)
                if dep_ratio > 10:
                    print "if you are experiencing performance problems:"
                    print "call dexy with -dryrun and inspect logs/batch-XXXX.json to debug dependencies"
                    print "consider using -strictinherit or reducing your use of 'allinputs' "

        try:
            self.log.debug("Beginning topological sort...")
            topsort_ordering = topsort(self.depends)
            self.log.debug("Topological sort completed successfully.")
        except CycleError as e:
            print "There are circular dependencies!"
            answer, num_parents, children = e.args
            for child, parents in children.items():
                for parent in parents:
                    print "%s depends on %s" % (self.members.keys()[parent], self.members.keys()[child])
            raise dexy.commands.UserFeedback(e.message)

        docs_without_dependencies = frozenset(range(len(self.members))) - frozenset(topsort_ordering)
        self.ordering = topsort_ordering + list(docs_without_dependencies)

        for i in self.ordering:
            key = self.members.keys()[i]
            self.docs.append(self.members[key])
Exemple #33
0
 def values(self):
     return list(OrderedDict.values(self))
Exemple #34
0
class GenerateWebPages(object):
    def __init__(self, getConfigValue, pageDir, resourceNames,
                 pageTitle, pageSubTitles, pageVersion, extraVersions, descriptionInfo):
        self.pageTitle = pageTitle
        self.pageSubTitles = pageSubTitles
        self.pageVersion = pageVersion
        self.extraVersions = extraVersions
        self.pageDir = pageDir
        self.pagesOverview = OrderedDict()
        self.pagesDetails = OrderedDict()
        self.getConfigValue = getConfigValue
        self.resourceNames = resourceNames
        self.descriptionInfo = descriptionInfo
        self.diag = logging.getLogger("GenerateWebPages")

    def makeSelectors(self, subPageNames, tags=[]):
        allSelectors = []
        firstSubPageName = self.getConfigValue("historical_report_subpages", "default")[0]
        for subPageName in subPageNames:
            if subPageName == firstSubPageName:
                suffix = ""
            else:
                suffix = "_" + subPageName.lower()
            allSelectors.append(Selector(subPageName, suffix, self.getConfigValue, tags))
        return allSelectors
    
    def removeUnused(self, unused, tagData):
        successTags = {}
        for tag in unused:
            for fn in tagData.get(tag):
                if os.path.basename(fn).startswith("teststate_"):
                    os.remove(fn)
                else:
                    successTags.setdefault(fn, []).append(tag)
        for fn, tagsToRemove in successTags.items():
            linesToKeep = []
            with open(fn) as readFile:
                for line in readFile:
                    tag = line.strip().split()[0]
                    if tag not in tagsToRemove:
                        linesToKeep.append(line)
           
            with open(fn, "w") as writeFile:
                for line in linesToKeep:
                    writeFile.write(line)
    
    def generate(self, repositoryDirs, subPageNames, archiveUnused):
        minorVersionHeader = HTMLgen.Container()
        allMonthSelectors = set()
        latestMonth = None
        pageToGraphs = {}
        for version, repositoryDirInfo in repositoryDirs.items():
            self.diag.info("Generating " + version)
            tagData, stateFiles, successFiles = self.findTestStateFilesAndTags(repositoryDirInfo)
            if len(stateFiles) > 0 or len(successFiles) > 0:
                tags = tagData.keys()
                tags.sort(self.compareTags)
                selectors = self.makeSelectors(subPageNames, tags)
                monthSelectors = SelectorByMonth.makeInstances(tags)
                allMonthSelectors.update(monthSelectors)
                allSelectors = selectors + list(reversed(monthSelectors))
                # If we already have month pages, we only regenerate the current one
                if len(self.getExistingMonthPages()) == 0:
                    selectors = allSelectors
                else:
                    currLatestMonthSel = monthSelectors[-1]
                    if latestMonth is None or currLatestMonthSel.linkName == latestMonth:
                        selectors.append(monthSelectors[-1])
                        latestMonth = currLatestMonthSel.linkName
                    selectedTags = set()
                    unusedTags = set(tags)
                    for selector in selectors:
                        currTags = set(selector.selectedTags)
                        selectedTags.update(currTags)
                        if archiveUnused:
                            unusedTags.difference_update(currTags)
                    tags = filter(lambda t: t in selectedTags, tags)
                    if archiveUnused and unusedTags:
                        plugins.log.info("Automatic repository cleaning will now remove old data for the following runs:")
                        for tag in sorted(unusedTags, self.compareTags):
                            plugins.log.info("- " + tag)
                        plugins.log.info("(To disable automatic repository cleaning in future, please run with the --manualarchive flag when collating the HTML report.)")
                        self.removeUnused(unusedTags, tagData)

                loggedTests = OrderedDict()
                categoryHandlers = {}
                self.diag.info("Processing " + str(len(stateFiles)) + " teststate files")
                relevantFiles = 0
                for stateFile, repository in stateFiles:
                    tag = self.getTagFromFile(stateFile)
                    if len(tags) == 0 or tag in tags:
                        relevantFiles += 1
                        testId, state, extraVersion = self.processTestStateFile(stateFile, repository)
                        loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = state
                        categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, state.category, extraVersion, state)
                        if relevantFiles % 100 == 0:
                            self.diag.info("- Processed " + str(relevantFiles) + " files with matching tags so far")
                self.diag.info("Processed " + str(relevantFiles) + " relevant teststate files")
                self.diag.info("Processing " + str(len(successFiles)) + " success files")
                for successFile, repository in successFiles:
                    testId = self.getTestIdentifier(successFile, repository)
                    extraVersion = self.findExtraVersion(repository)
                    with open(successFile) as f:
                        fileTags = set()
                        for line in f:
                            parts = line.strip().split(" ", 1)
                            if len(parts) != 2:
                                continue
                            tag, text = parts
                            if tag in fileTags:
                                sys.stderr.write("WARNING: more than one result present for tag '" + tag + "' in file " + successFile + "!\n")
                                sys.stderr.write("Ignoring later ones\n")
                                continue
                                
                            fileTags.add(tag)
                            if len(tags) == 0 or tag in tags:
                                loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = text
                                categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, "success", extraVersion, text)
                self.diag.info("Processed " + str(len(successFiles)) + " success files")
                versionToShow = self.removePageVersion(version)
                hasData = False
                for sel in selectors:
                    filePath = self.getPageFilePath(sel)
                    if self.pagesOverview.has_key(filePath):
                        page, pageColours = self.pagesOverview[filePath]
                    else:
                        page = self.createPage()
                        pageColours = set()
                        self.pagesOverview[filePath] = page, pageColours

                    tableHeader = self.getTableHeader(version, repositoryDirs)
                    heading = self.getHeading(versionToShow)
                    hasNewData, graphLink, tableColours = self.addTable(page, self.resourceNames, categoryHandlers, version,
                                                                        loggedTests, sel, tableHeader, filePath, heading, repositoryDirInfo)
                    hasData |= hasNewData
                    pageColours.update(tableColours)
                    if graphLink:
                        pageToGraphs.setdefault(page, []).append(graphLink)
                            
                if hasData and versionToShow:
                    link = HTMLgen.Href("#" + version, versionToShow)
                    minorVersionHeader.append(link)
                
                # put them in reverse order, most relevant first
                linkFromDetailsToOverview = [ sel.getLinkInfo(self.pageVersion) for sel in allSelectors ]
                for tag in tags:
                    details = self.pagesDetails.setdefault(tag, TestDetails(tag, self.pageTitle, self.pageSubTitles))
                    details.addVersionSection(version, categoryHandlers[tag], linkFromDetailsToOverview)
                
        selContainer = HTMLgen.Container()
        selectors = self.makeSelectors(subPageNames)
        for sel in selectors:
            target, linkName = sel.getLinkInfo(self.pageVersion)
            selContainer.append(HTMLgen.Href(target, linkName))

        monthContainer = HTMLgen.Container()
        if len(allMonthSelectors) == 1:
            # Don't want just one month, no navigation possible
            prevMonth = list(allMonthSelectors)[0].getPreviousMonthSelector()
            allMonthSelectors.add(prevMonth)
        
        for sel in sorted(allMonthSelectors):
            target, linkName = sel.getLinkInfo(self.pageVersion)
            monthContainer.append(HTMLgen.Href(target, linkName))
        
        for page, pageColours in self.pagesOverview.values():
            if len(monthContainer.contents) > 0:
                page.prepend(HTMLgen.Heading(2, monthContainer, align = 'center'))
            graphs = pageToGraphs.get(page)
            page.prepend(HTMLgen.Heading(2, selContainer, align = 'center'))
            if minorVersionHeader.contents:
                if not graphs is None and len(graphs) > 1:
                    page.prepend(HTMLgen.Heading(1, *graphs, align = 'center'))
                page.prepend(HTMLgen.Heading(1, minorVersionHeader, align = 'center'))
            creationDate = TitleWithDateStamp("").__str__().strip()
            page.prepend(HTMLgen.Paragraph(creationDate, align="center"))
            page.prepend(HTMLgen.Heading(1, self.getHeading(), align = 'center'))
            if len(pageColours) > 0:
                page.prepend(HTMLgen.BR());
                page.prepend(HTMLgen.BR());
                page.script = self.getFilterScripts(pageColours)

        self.writePages()

    def getFilterScripts(self, pageColours):
        finder = ColourFinder(self.getConfigValue)
        rowHeaderColour = finder.find("row_header_bg")
        successColour = finder.find("success_bg")
        # Always put green at the start, we often want to filter that
        sortedColours = sorted(pageColours, key=lambda c: (c != successColour, c))
        scriptCode = "var TEST_ROW_HEADER_COLOR = " + repr(rowHeaderColour) + ";\n" + \
                     "var Colors = " + repr(sortedColours) + ";"
        return [ HTMLgen.Script(code=scriptCode),
                 HTMLgen.Script(src="../javascript/jquery.js"),
                 HTMLgen.Script(src="../javascript/filter.js"),
                 HTMLgen.Script(src="../javascript/comment.js")  ]

    def getHeading(self, versionToShow=""):
        heading = "Test results for " + self.pageTitle
        if versionToShow:
            heading += "." + versionToShow
        return heading
    
    def getTableHeader(self, version, repositoryDirs):
        return version if len(repositoryDirs) > 1 else ""
                
    def getExistingMonthPages(self):
        return glob(os.path.join(self.pageDir, "test_" + self.pageVersion + "_all_???[0-9][0-9][0-9][0-9].html"))

    def compareTags(self, x, y):
        timeCmp = cmp(self.getTagTimeInSeconds(x), self.getTagTimeInSeconds(y))
        if timeCmp:
            return timeCmp
        elif len(x) != len(y):
            # If the timing is the same, sort alphabetically
            # Any number should be sorted numerically, do this by padding them with leading zeroes
            return cmp(plugins.padNumbersWithZeroes(x), plugins.padNumbersWithZeroes(y))
        else:
            return cmp(x, y)
        
    def getTagFromFile(self, fileName):
        return os.path.basename(fileName).replace("teststate_", "")
        
    def findTestStateFilesAndTags(self, repositoryDirs):
        tagData, stateFiles, successFiles = {}, [], []
        for _, dir in repositoryDirs:
            self.diag.info("Looking for teststate files in " + dir)
            for root, _, files in sorted(os.walk(dir)):
                for file in files:
                    path = os.path.join(root, file)
                    if file.startswith("teststate_"):
                        tag = self.getTagFromFile(file)
                        stateFiles.append((path, dir))
                        tagData.setdefault(tag, []).append(path)
                    elif file.startswith("succeeded_"):
                        successFiles.append((path, dir))
                        with open(path) as f:
                            for line in f:
                                parts = line.split()
                                if parts:
                                    tag = parts[0]
                                    tagData.setdefault(tag, []).append(path)
                                
            self.diag.info("Found " + str(len(stateFiles)) + " teststate files and " + str(len(successFiles)) + " success files in " + dir)
        return tagData, stateFiles, successFiles
                          
    def processTestStateFile(self, stateFile, repository):
        state = self.readState(stateFile)
        testId = self.getTestIdentifier(stateFile, repository)
        extraVersion = self.findExtraVersion(repository)
        return testId, state, extraVersion
    
    def findExtraVersion(self, repository):
        versions = os.path.basename(repository).split(".")
        for i in xrange(len(versions)):
            version = ".".join(versions[i:])
            if version in self.extraVersions:
                return version
        return ""

    @staticmethod
    def findGlobal(modName, className):
        try:
            exec "from " + modName + " import " + className + " as _class"
        except ImportError:
            exec "from texttestlib." + modName + " import " + className + " as _class"
        return _class #@UndefinedVariable
        
    @classmethod
    def getNewState(cls, file):
        # Would like to do load(file) here... but it doesn't work with universal line endings, see Python bug 1724366
        from cStringIO import StringIO
        unpickler = Unpickler(StringIO(file.read()))
        # Magic to keep us backward compatible in the face of packages changing...
        unpickler.find_global = cls.findGlobal
        return unpickler.load()
        
    @classmethod
    def readState(cls, stateFile):
        file = open(stateFile, "rU")
        try:
            state = cls.getNewState(file)
            if isinstance(state, plugins.TestState):
                return state
            else:
                return cls.readErrorState("Incorrect type for state object.")
        except Exception, e:
            if os.path.getsize(stateFile) > 0:
                return cls.readErrorState("Stack info follows:\n" + str(e))
            else:
                return plugins.Unrunnable("Results file was empty, probably the disk it resides on is full.", "Disk full?")
    def update(self):
        # Allow the source to provide terms until we have more specific ones
        # from the query.
        # Things do not go well if self.terms is None
        self._bound_source = None
        source = self.bound_source
        terms = OrderedDict()

        # populate select values if needed
        if self.populate_select:
            for t in self.source:
                if not t.token in terms:
                    terms[t.token] = t

        # pre defined terms from context+source
        self.terms = SourceTerms(self.context, self.request, self.form, self.field, self, source)

        # If we have values in the request,
        # use these to get the terms.
        request_values = z3c.form.interfaces.NOVALUE
        # extract selected value
        if not self.ignoreRequest:
            request_values = self.extract(
                default=z3c.form.interfaces.NOVALUE)

        if request_values is not z3c.form.interfaces.NOVALUE:
            if not isinstance(request_values, (tuple, set, list)):
                request_values = (request_values,)

            for token in request_values:
                if not token or token == self.noValueToken:
                    continue
                try:
                    t = source.getTermByToken(token)
                    terms[t.token] = t
                except LookupError:
                    # Term no longer available
                    if not self.ignoreMissing:
                        raise

        # take the value from the current saved value
        # if there is an existing adapter allowing it
        if not self.ignoreContext:
            selection = zope.component.getMultiAdapter(
                (self.context, self.field),
                z3c.form.interfaces.IDataManager).query()
            if selection is z3c.form.interfaces.NOVALUE:
                selection = []
            elif not isinstance(selection,
                                (tuple, set, list)):
                selection = [selection]
            for value in selection:
                if not value:
                    continue
                try:
                    t = source.getTerm(value)
                    terms[t.token] = t
                except LookupError:
                    # Term no longer available
                    if not self.ignoreMissing:
                        raise
        # re-set terms with values from request
        self.terms = QueryTerms(self.context, self.request, self.form, self.field, self, terms.values())

        # update widget selected value if any
        select.SelectWidget.update(self)
Exemple #36
0
def group_member_export(request, group_slug):
    """
    Export all group members for a specific group
    """
    group = get_object_or_404(Group, slug=group_slug)

    # if they can edit it, they can export it
    if not has_perm(request.user,'user_groups.change_group', group):
        raise Http403

    import xlwt
    from ordereddict import OrderedDict
    from django.db import connection

    # create the excel book and sheet
    book = xlwt.Workbook(encoding='utf8')
    sheet = book.add_sheet('Group Members')
    
    # the key is what the column will be in the
    # excel sheet. the value is the database lookup
    # Used OrderedDict to maintain the column order
    group_mappings = OrderedDict([
        ('user_id', 'au.id'),
        ('first_name', 'au.first_name'),
        ('last_name', 'au.last_name'),
        ('email', 'au.email'),
        ('receives email', 'pp.direct_mail'),
        ('company', 'pp.company'),
        ('address', 'pp.address'),
        ('address2', 'pp.address2'),
        ('city', 'pp.city'),
        ('state', 'pp.state'),
        ('zipcode', 'pp.zipcode'),
        ('country', 'pp.country'),
        ('phone', 'pp.phone'),
        ('is_active', 'au.is_active'),
        ('date', 'gm.create_dt'),
    ])
    group_lookups = ','.join(group_mappings.values())

    # Use custom sql to fetch the rows because we need to
    # populate the user profiles information and you
    # cannot do that with django's ORM without using
    # get_profile() for each user query
    # pulling 13,000 group members can be done in one
    # query using Django's ORM but then you need
    # 13,000 individual queries :(
    cursor = connection.cursor()
    sql = "SELECT %s FROM user_groups_groupmembership gm \
           INNER JOIN auth_user au ON (au.id = gm.member_id) \
           LEFT OUTER JOIN profiles_profile pp \
           on (pp.user_id = gm.member_id) WHERE group_id = %%s;"
    sql =  sql % group_lookups
    cursor.execute(sql, [group.pk])
    values_list = list(cursor.fetchall())

    # Append the heading to the list of values that will
    # go into the excel sheet
    values_list.insert(0, group_mappings.keys())
    
    # excel date styles
    default_style = xlwt.Style.default_style
    datetime_style = xlwt.easyxf(num_format_str='mm/dd/yyyy hh:mm')
    date_style = xlwt.easyxf(num_format_str='mm/dd/yyyy')

    if values_list:
        # Write the data enumerated to the excel sheet
        for row, row_data in enumerate(values_list):
            for col, val in enumerate(row_data):
                # styles the date/time fields
                if isinstance(val, datetime):
                    style = datetime_style
                elif isinstance(val, date):
                    style = date_style
                else:
                    style = default_style
                sheet.write(row, col, val, style=style)

    response = HttpResponse(mimetype='application/vnd.ms-excel')
    response['Content-Disposition'] = 'attachment; filename=group_%s_member_export.xls' % group.pk
    book.save(response)
    return response
Exemple #37
0
 def apply(self):
     """Transform each
     :class:`~sphinxcontrib.bibtex.nodes.bibliography` node into a
     list of citations.
     """
     env = self.document.settings.env
     for bibnode in self.document.traverse(bibliography):
         # get the information of this bibliography node
         # by looking up its id in the bibliography cache
         id_ = bibnode['ids'][0]
         infos = [info for other_id, info
                  in env.bibtex_cache.bibliographies.items()
                  if other_id == id_ and info.docname == env.docname]
         if not infos:
             raise RuntimeError(
                 "document %s has no bibliography nodes with id '%s'"
                 % (env.docname, id_))
         elif len(infos) >= 2:
             raise RuntimeError(
                 "document %s has multiple bibliography nodes with id '%s'"
                 % (env.docname, id_))
         info = infos[0]
         # generate entries
         entries = OrderedDict()
         for bibfile in info.bibfiles:
             # XXX entries are modified below in an unpickable way
             # XXX so fetch a deep copy
             data = env.bibtex_cache.bibfiles[bibfile].data
             if info.cite == "all":
                 bibfile_entries = data.entries.values()
             elif info.cite == "cited":
                 bibfile_entries = (
                     entry for entry in data.entries.values()
                     if env.bibtex_cache.is_cited(entry.key))
             elif info.cite == "notcited":
                 bibfile_entries = (
                     entry for entry in data.entries.values()
                     if not env.bibtex_cache.is_cited(entry.key))
             else:
                 raise RuntimeError("invalid cite option (%s)" % info.cite)
             for entry in bibfile_entries:
                 entries[entry.key] = copy.deepcopy(entry)
         # order entries according to which were cited first
         # first, we add all keys that were cited
         # then, we add all remaining keys
         sorted_entries = []
         for key in env.bibtex_cache.get_all_cited_keys():
             try:
                 entry = entries.pop(key)
             except KeyError:
                 pass
             else:
                 sorted_entries.append(entry)
         sorted_entries += entries.values()
         # locate and instantiate style plugin
         style_cls = find_plugin(
             'pybtex.style.formatting', info.style)
         style = style_cls()
         # create citation nodes for all references
         backend = output_backend()
         if info.list_ == "enumerated":
             nodes = docutils.nodes.enumerated_list()
             nodes['enumtype'] = info.enumtype
             if info.start >= 1:
                 nodes['start'] = info.start
                 env.bibtex_cache.set_enum_count(env.docname, info.start)
             else:
                 nodes['start'] = env.bibtex_cache.get_enum_count(env.docname)
         elif info.list_ == "bullet":
             nodes = docutils.nodes.bullet_list()
         else: # "citation"
             nodes = docutils.nodes.paragraph()
         # XXX style.format_entries modifies entries in unpickable way
         for entry in style.format_entries(sorted_entries):
             if info.list_ == "enumerated" or info.list_ == "bullet":
                 citation = docutils.nodes.list_item()
                 citation += entry.text.render(backend)
             else: # "citation"
                 citation = backend.citation(entry, self.document)
                 # backend.citation(...) uses entry.key as citation label
                 # we change it to entry.label later onwards
                 # but we must note the entry.label now;
                 # at this point, we also already prefix the label
                 key = citation[0].astext()
                 info.labels[key] = info.labelprefix + entry.label
             node_text_transform(citation, transform_url_command)
             if info.curly_bracket_strip:
                 node_text_transform(citation, transform_curly_bracket_strip)
             nodes += citation
             if info.list_ == "enumerated":
                 env.bibtex_cache.inc_enum_count(env.docname)
         bibnode.replace_self(nodes)
class HasParameters(object):
    """This class provides an implementation of the IHasParameters interface."""

    _do_not_promote = ['get_expr_depends', 'get_referenced_compnames',
                       'get_referenced_varpaths', 'get_metadata']

    def __init__(self, parent):
        self._parameters = OrderedDict()
        self._allowed_types = ['continuous']
        if obj_has_interface(parent, ISolver):
            self._allowed_types.append('unbounded')
        self._parent = None if parent is None else weakref.ref(parent)

    def __getstate__(self):
        state = self.__dict__.copy()
        state['_parent'] = self.parent
        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        parent = state['_parent']
        self._parent = None if parent is None else weakref.ref(parent)

    @property
    def parent(self):
        """ The object we are a delegate of. """
        return None if self._parent is None else self._parent()

    def _item_count(self):
        """This is used by the replace function to determine if a delegate from
        the target object is 'empty' or not.  If it's empty, it's not an error
        if the replacing object doesn't have this delegate.
        """
        return len(self._parameters)

    def add_parameter(self, target, low=None, high=None,
                      scaler=None, adder=None, start=None,
                      fd_step=None, name=None, scope=None):
        """Adds a parameter or group of parameters to the driver.

        target: string or iter of strings or Parameter
            What the driver should vary during execution. A *target* is an
            expression that can reside on the left-hand side of an assignment
            statement, so typically it will be the name of a variable or
            possibly a subscript expression indicating an entry within an array
            variable, e.g., x[3]. If an iterator of targets is given, then the
            driver will set all targets given to the same value whenever it
            varies this parameter during execution. If a Parameter instance is
            given, then that instance is copied into the driver with any other
            arguments specified, overiding the values in the given parameter.

        low: float (optional)
            Minimum allowed value of the parameter. If scaler and/or adder
            is supplied, use the transformed value here. If target is an array,
            this may also be an array, but must have the same size.

        high: float (optional)
            Maximum allowed value of the parameter. If scaler and/or adder
            is supplied, use the transformed value here. If target is an array,
            this may also be an array, but must have the same size.

        scaler: float (optional)
            Value to multiply the possibly offset parameter value by. If target
            is an array, this may also be an array, but must have the same size.

        adder: float (optional)
            Value to add to parameter prior to possible scaling. If target is
            an array, this may also be an array, but must have the same size.

        start: any (optional)
            Value to set into the target or targets of a parameter before
            starting any executions. If not given, analysis will start with
            whatever values are in the target or targets at that time. If target
            is an array, this may also be an array, but must have the same size.

        fd_step: float (optional)
            Step-size to use for finite difference calculation. If no value is
            given, the differentiator will use its own default. If target is an
            array, this may also be an array, but must have the same size.

        name: str (optional)
            Name used to refer to the parameter in place of the name of the
            variable referred to in the parameter string.
            This is sometimes useful if, for example, multiple entries in the
            same array variable are declared as parameters.

        scope: object (optional)
            The object to be used as the scope when evaluating the expression.

        If neither "low" nor "high" is specified, the min and max will
        default to the values in the metadata of the variable being
        referenced.
        """

        if isinstance(target, (ParameterBase, ParameterGroup)):
            self._parameters[target.name] = target
            target.override(low, high, scaler, adder, start, fd_step, name)
        else:
            if isinstance(target, basestring):
                names = [target]
                key = target
            else:
                names = target
                key = tuple(target)

            if name is not None:
                key = name

            dups = set(self.list_param_targets()).intersection(names)
            if len(dups) == 1:
                self.parent.raise_exception("'%s' is already a Parameter"
                                            " target" % dups.pop(), ValueError)
            elif len(dups) > 1:
                self.parent.raise_exception("%s are already Parameter targets"
                                            % sorted(list(dups)), ValueError)

            if key in self._parameters:
                self.parent.raise_exception("%s is already a Parameter" % key,
                                            ValueError)
            try:
                _scope = self._get_scope(scope)
                if len(names) == 1:
                    target = self._create(names[0], low, high, scaler, adder,
                                          start, fd_step, key, _scope)
                else:  # defining a ParameterGroup
                    parameters = [self._create(n, low, high, scaler, adder,
                                               start, fd_step, key, _scope)
                                  for n in names]
                    types = set([p.valtypename for p in parameters])
                    if len(types) > 1:
                        raise ValueError("Can't add parameter %s because "
                                         "%s are not all of the same type" %
                                         (key, " and ".join(names)))
                    target = ParameterGroup(parameters)
                self._parameters[key] = target
            except Exception:
                self.parent.reraise_exception()

        self.parent.config_changed()

    def _create(self, target, low, high, scaler, adder, start, fd_step,
                key, scope):
        """ Create one Parameter or ArrayParameter. """
        try:
            expreval = ExprEvaluator(target, scope)
        except Exception as err:
            raise err.__class__("Can't add parameter: %s" % err)
        if not expreval.is_valid_assignee():
            raise ValueError("Can't add parameter: '%s' is not a"
                             " valid parameter expression"
                             % expreval.text)
        try:
            val = expreval.evaluate()
        except Exception as err:
            val = None  # Let Parameter code sort out why.

        name = key[0] if isinstance(key, tuple) else key

        if isinstance(val, ndarray):
            return ArrayParameter(target, low=low, high=high,
                                  scaler=scaler, adder=adder,
                                  start=start, fd_step=fd_step,
                                  name=name, scope=scope,
                                  _expreval=expreval, _val=val,
                                  _allowed_types=self._allowed_types)
        else:
            return Parameter(target, low=low, high=high,
                             scaler=scaler, adder=adder,
                             start=start, fd_step=fd_step,
                             name=name, scope=scope,
                             _expreval=expreval, _val=val,
                             _allowed_types=self._allowed_types)

    def remove_parameter(self, name):
        """Removes the parameter with the given name."""
        param = self._parameters.get(name)
        if param:
            del self._parameters[name]
        else:
            self.parent.raise_exception("Trying to remove parameter '%s' "
                                        "that is not in this driver."
                                        % (name,), AttributeError)
        self.parent.config_changed()

    def config_parameters(self):
        """Reconfigure parameters from potentially changed targets."""
        for param in self._parameters.values():
            param.configure()

    def get_references(self, name):
        """Return references to component `name` in preparation for subsequent
        :meth:`restore_references` call.

        name: string
            Name of component being removed.
        """
        refs = OrderedDict()
        for pname, param in self._parameters.items():
            if name in param.get_referenced_compnames():
                refs[pname] = param
        return refs

    def remove_references(self, name):
        """Remove references to component `name`.

        name: string
            Name of component being removed.
        """
        to_remove = []
        for pname, param in self._parameters.items():
            if name in param.get_referenced_compnames():
                to_remove.append(pname)

        for pname in to_remove:
            self.remove_parameter(pname)

    def restore_references(self, refs):
        """Restore references to component `name` from `refs`.

        refs: object
            Value returned by :meth:`get_references`.
        """
        for pname, param in refs.items():
            try:
                self.add_parameter(param)
            except Exception as err:
                self.parent._logger.warning("Couldn't restore parameter '%s': %s"
                                            % (pname, str(err)))

    def list_param_targets(self):
        """Returns a list of parameter targets. Note that this
        list may contain more entries than the list of Parameter,
        ParameterGroup, and ArrayParameter objects since ParameterGroup
        instances have multiple targets.
        """
        targets = []
        for param in self._parameters.values():
            targets.extend(param.targets)
        return targets

    def list_param_group_targets(self):
        """Returns a list of tuples that contain the targets for each
        parameter group.
        """
        targets = []
        for param in self.get_parameters().values():
            targets.append(tuple(param.targets))
        return targets

    def clear_parameters(self):
        """Removes all parameters."""
        for name in self._parameters.keys():
            self.remove_parameter(name)
        self._parameters = OrderedDict()

    def get_parameters(self):
        """Returns an ordered dict of parameter objects."""
        return self._parameters

    def total_parameters(self):
        """Returns the total number of values to be set."""
        return sum([param.size for param in self._parameters.values()])

    def init_parameters(self):
        """Sets all parameters to their start value if a
        start value is given
        """
        scope = self._get_scope()
        for param in self._parameters.itervalues():
            if param.start is not None:
                param.set(param.start, scope)

    def set_parameter_by_name(self, name, value, case=None, scope=None):
        """Sets a single parameter by its name attribute.

        name: str
            Name of the parameter. This is either the name alias given when
            the parameter was added or the variable path of the parameter's
            target if no name was given.

        value: object (typically a float)
            Value of the parameter to be set.

        case: Case (optional)
            If supplied, the values will be associated with their corresponding
            targets and added as inputs to the Case instead of being set
            directly into the model.
        """
        param = self._parameters[name]
        if case is None:
            param.set(value, self._get_scope(scope))
        else:
            for target in param.targets:
                case.add_input(target, value)
            return case

    def set_parameters(self, values, case=None, scope=None):
        """Pushes the values in the iterator 'values' into the corresponding
        variables in the model.  If the 'case' arg is supplied, the values
        will be set into the case and not into the model.

        values: iterator
            Iterator of input values with an order defined to match the
            order of parameters returned by the get_parameters method. All
            'values' must support the len() function.

        case: Case (optional)
            If supplied, the values will be associated with their corresponding
            targets and added as inputs to the Case instead of being set
            directly into the model.
        """
        if len(values) != self.total_parameters():
            raise ValueError("number of input values (%s) != expected number of"
                             " values (%s)" %
                             (len(values), self.total_parameters()))
        if case is None:
            scope = self._get_scope(scope)
            start = 0
            for param in self._parameters.values():
                size = param.size
                if size == 1:
                    param.set(values[start], scope)
                    start += 1
                else:
                    end = start + size
                    param.set(values[start:end], scope)
                    start = end
        else:
            start = 0
            for param in self._parameters.values():
                size = param.size
                if size == 1:
                    for target in param.targets:
                        case.add_input(target, values[start])
                    start += 1
                else:
                    end = start + size
                    for target in param.targets:
                        case.add_input(target, values[start:end])
                    start = end
            return case

    def eval_parameters(self, scope=None, dtype='d'):
        """Return evaluated parameter values.

        dtype: string or None
            If not None, return an array of this dtype. Otherwise just return
            a list (useful if parameters may be of different types).
        """
        result = []
        for param in self._parameters.values():
            result.extend(param.evaluate(scope))
        if dtype:
            result = array(result, dtype)
        return result

    def get_lower_bounds(self, dtype='d'):
        """Return lower bound values.

        dtype: string or None
            If not None, return an array of this dtype. Otherwise just return
            a list (useful if parameters may be of different types).
        """
        result = []
        for param in self._parameters.values():
            result.extend(param.get_low())
        if dtype:
            result = array(result, dtype)
        return result

    def get_upper_bounds(self, dtype='d'):
        """Return upper bound values.

        dtype: string or None
            If not None, return an array of this dtype. Otherwise just return
            a list (useful if parameters may be of different types).
        """
        result = []
        for param in self._parameters.values():
            result.extend(param.get_high())
        if dtype:
            result = array(result, dtype)
        return result

    def get_fd_steps(self, dtype='d'):
        """Return fd_step values, they may include None.

        dtype: string or None
            If not None, return an array of this dtype. Otherwise just return
            a list (useful if it's valid to have None for a step size).
        """
        result = []
        for param in self._parameters.values():
            result.extend(param.get_fd_step())
        if dtype:
            result = array(result, dtype)
        return result

    def get_expr_depends(self):
        """Returns a list of tuples of the form (src_comp_name, dest_comp_name)
        for each dependency introduced by a parameter.
        """
        conn_list = []
        pname = self.parent.name
        for param in self._parameters.values():
            for cname in param.get_referenced_compnames():
                conn_list.append((pname, cname))
        return conn_list

    def get_referenced_compnames(self):
        """Return a set of Component names based on the
        pathnames of Variables referenced in our target strings.
        """
        result = set()
        for param in self._parameters.values():
            result.update(param.get_referenced_compnames())
        return result

    def get_referenced_varpaths(self):
        """Return a set of Variable names referenced in our target strings.
        """
        result = set()
        for param in self._parameters.values():
            result.update(param.get_referenced_varpaths())
        return result

    def _get_scope(self, scope=None):
        if scope is None:
            try:
                return self.parent.get_expr_scope()
            except AttributeError:
                pass
        return scope

    def mimic(self, target):
        old = self._parameters
        self.clear_parameters()
        try:
            for name, param in target.get_parameters().items():
                self._parameters[name] = param.copy()
        except Exception:
            self._parameters = old
            raise
Exemple #39
0
 def table_entry(self) :
     strattrs = OrderedDict((attr, str(getattr(self, attr))) for attr in self.__slots__)
     strattrs['mass'] = str(self.mass/1000.) # masses are in GeV in the particle table. 
     return ' '.join(strattrs.values())
Exemple #40
0
class ActionRunner(BaseActionRunner):
    def __init__(self, optionMap, *args):
        BaseActionRunner.__init__(self, optionMap,
                                  logging.getLogger("Action Runner"))
        self.currentTestRunner = None
        self.previousTestRunner = None
        self.appRunners = OrderedDict()

    def addSuite(self, suite):
        plugins.log.info("Using " +
                         suite.app.description(includeCheckout=True))
        appRunner = ApplicationRunner(suite, self.diag)
        self.appRunners[suite.app] = appRunner

    def notifyAllReadAndNotified(self):
        # kicks off processing. Don't use notifyAllRead as we end up running all the tests before
        # everyone's been notified of the reading.
        self.runAllTests()

    def notifyRerun(self, test):
        if self.currentTestRunner and self.currentTestRunner.test is test:
            self.diag.info("Got rerun notification for " + repr(test) +
                           ", resetting actions")
            self.currentTestRunner.resetActionSequence()

    def runTest(self, test):
        # We have the lock coming in to here...
        appRunner = self.appRunners.get(test.app)
        if appRunner:
            self.lock.acquire()
            self.currentTestRunner = TestRunner(test, appRunner, self.diag,
                                                self.exited, self.killSignal)
            self.lock.release()

            self.currentTestRunner.performActions(self.previousTestRunner)
            self.previousTestRunner = self.currentTestRunner

            self.lock.acquire()
            self.currentTestRunner = None
            self.notifyComplete(test)
            self.lock.release()

    def killTests(self):
        if self.currentTestRunner:
            self.currentTestRunner.kill(self.killSignal)

    def killOrCancel(self, test):
        if self.currentTestRunner and self.currentTestRunner.test is test:
            self.currentTestRunner.kill()
        else:
            self.cancel(test)

    def getAllActionClasses(self):
        classes = set()
        for appRunner in self.appRunners.values():
            for action in appRunner.actionSequence:
                classes.add(action.__class__)
        return classes

    def cleanup(self):
        for actionClass in self.getAllActionClasses():
            actionClass.finalise()
        for appRunner in self.appRunners.values():
            appRunner.cleanActions()
Exemple #41
0
class Dependency:
    ## Construct an empty dependency tree
    #  @param self the object reference
    #  @param silent  minimal feedback
    #  @param autobuild  warn rather than fail on multiple version dependnecies. XXX
    def __init__(self, silent=True, autobuild=False):
        ## The ASKAP top-level directory
        self.ASKAPROOT = os.environ.get("ASKAP_ROOT")
        if self.ASKAPROOT is None:
            msg = "ASKAP_ROOT environment variable is not defined"
            raise BuildError(msg)
        #
        self.DEPFILE = "dependencies"  # The basename of the dependency file
        self.INSTALL_SUBDIR = "install"
        self._deps = OrderedDict()
        #
        self._bindirs = []
        self._incdirs = []
        self._libdirs = []
        self._rootdirs = []
        #
        self._cppflags = []  # XXX "defs" in package.info. LOFAR/log4cxx
        #
        self._env = []
        self._jars = []
        self._libs = []
        self._packages = []
        #
        self._ldlibpath = ""
        self._pypath = ""
        #
        self._autobuild = autobuild
        self._silent = silent  # mimimal output
        self.selfupdate = False  # should object request updates from svn

        self._codename = utils.get_platform()['codename']
        self._system = utils.get_platform()['system'].lower()
        self._hostname = socket.gethostname().split(".")[0]

    def q_print(self, msg):
        if self._silent:
            return
        utils.q_print(msg)

    ## Get the path of the specified dependency package
    # @param self the current object
    # @param key the label of the package dependency
    # @return the path (relative to ASKAP_ROOT) to the package
    def get_dep_path(self, key):
        return self._deps[key]["path"]

    def get_explicit(self):
        return [v["path"] for v in self._deps.values() if v["explicit"]]

    # Used by "in" test.
    # object.__contains__(self, item)
    #
    # Called to implement membership test operators. Should return true if item
    # is in self, false otherwise. For mapping objects, this should consider
    # the keys of the mapping rather than the values or the key-item pairs.
    #
    # For objects that do not define __contains__(), the membership test first
    # tries iteration via __iter__(), then the old sequence iteration protocol
    # via __getitem__(), see this section in the language reference.
    #
    # http://docs.python.org/reference/datamodel.html

    def __contains__(self, key):
        return self._deps.has_key(key)

    ## Get the absolute path to the dependency packages installed location
    # @param self the current object
    # @param key the label of the package dependency
    # @return the absolute path to the package installed location
    def get_install_path(self, key):
        rel_path = self._deps[key]["path"]
        full_path = os.path.join(self.ASKAPROOT, rel_path, self.INSTALL_SUBDIR)
        return os.path.abspath(full_path)

    def get_path(self):
        return os.path.pathsep.join(self._bindirs)

    ## Get the CPPFLAGS retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of library names
    def get_libs(self, mapped=False):
        if mapped:
            return self._libs[:]
        else:
            return [m[0] for m in self._libs]

    ## Get the environment variables retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a dictionary of ENVVAR => value pairs
    def get_env(self):
        return dict([i.split("=") for i in self._env])

    ## Get the the java classpath for the depencies
    #  @param self the object reference
    #  @return a classpath string of the form x/y/z.jar:a/b/c.jar
    def get_classpath(self):
        return os.path.pathsep.join(self._jars)

    ## Get the root directories of the tags retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of directory names
    def get_rootdirs(
            self,
            mapped=False):  # XXX used in ant.py builder with mapped=true.
        if mapped:
            return [ (k, os.path.join( self.ASKAPROOT, v['path'])) \
                for k,v in self._deps.iteritems()]
        return self._rootdirs[:]

    ## Get the LIBRARY directories retrieved in the dependency analysis
    #  @param self the object reference
    #  @param mapped return directory tuples (rootdir, libdir)
    #  @return a list of library directories or tuples of rootdirs and libdirs
    def get_librarydirs(self, mapped=False):
        if mapped:
            return self._libdirs[:]
        else:
            return [m[0] for m in self._libdirs]

    ## Get the LD_LIBRARY_PATH accumulated in the dependency analysis
    #  @param self the object reference
    #  @return a string representing the LD_LIBRARY_PATH
    def get_ld_library_path(self):
        return self._ldlibpath.strip(":")

    ## Get the INCLUDE directories retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of header file directories
    def get_includedirs(self):
        return self._incdirs[:]

    ## Get the CPPFLAGS retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list preprocessor flags
    def get_cppflags(self):
        return self._cppflags[:]

    def get_pythonpath(self):
        return self._pypath.strip(":")

    ## Get a list of doxygen tag files in the dependencies. This is used for
    #  cross-referencing the documentation
    #  @todo Re-enable: This has been disabled until it is working for python
    #  @param self the object reference
    #  @return a list of TAGFILES entries
    # XXX used only in scons_tools/askap_package.py
    def get_tagfiles(self):
        tagfiles = []
        for pth in self._rootdirs:
            tagname = utils.tag_name(pth)
            tagpath = os.path.join(pth, tagname)
            if os.path.exists(tagpath):
                tagfiles.append('"%s=%s/html"' % (tagpath, pth))
        return tagfiles

    def _get_dependencies(self, package, explicit=False):

        for ext in [self._hostname, self._system, self._codename, 'default']:
            if ext:  # i.e. not empty string
                depfile = '%s.%s' % (self.DEPFILE, ext)
                if package:
                    depfile = os.path.join(self.ASKAPROOT, package, depfile)
                if self.selfupdate:
                    # always update if it is the "root/target" package
                    basedir = os.path.split(depfile)[0] or "."
                    if not os.path.exists(basedir):
                        utils.update_tree(basedir)
                if os.path.exists(depfile):
                    self.q_print("info: processing %s" % depfile)
                    self._get_depfile(depfile, explicit=explicit)
                    break

    def _get_depfile(self, depfile, overwrite=False, explicit=False):
        if not os.path.exists(depfile):
            # assume no dependencies
            return
        dfh = file(depfile)
        for line in dfh.readlines():
            line = line.strip()
            if line.startswith("#"): continue
            kv = line.split("=", 1)
            if len(kv) == 2:
                key = kv[0].strip()
                value = kv[1].strip()
                # see if the file explicitly names any libs
                lspl = value.split(";")
                libs = None
                if len(lspl) > 1:
                    libs = lspl[1].strip().split()
                value = lspl[0]
                self._add_dependency(key, value, libs, overwrite, explicit)
                if not value.startswith("/"):
                    # recurse into ASKAP dependencies
                    # otherwise just move on as we specified system dependency
                    # which will not have a dependency file
                    self._packages.append(value)
                    self._get_dependencies(value)

        dfh.close()

    def _get_info(self, packagedir):
        info = {
            # A single directory path relative to the install directory.
            'bindir': 'bin',
            'distdir': 'dist',
            'incdir': 'include',
            'libdir': 'lib',
            # Space separated lists. XXX Default should be '[]'?
            'defs': None,
            'env': None,
            'jars': None,
            'libs': None,
            # Define a single python module name and version.
            # e.g. pymodule=numpy==1.2.0
            'pymodule': None,
        }
        sslists = ['defs', 'env', 'jars', 'libs']
        infofile = os.path.join(packagedir, 'package.info')

        if os.path.exists(infofile):
            f = file(infofile)
            for line in f.readlines():
                line = line.strip()
                if line.startswith("#"): continue
                kv = line.split("=", 1)
                if len(kv) == 2:
                    key = kv[0].strip()
                    value = kv[1].strip()
                    if key in info.keys():
                        if key in sslists:
                            info[key] = value.split()
                        else:
                            info[key] = value
            f.close()
        return info

    def _add_dependency(self,
                        key,
                        value,
                        libs,
                        overwrite=False,
                        explicit=False):
        if self._deps.has_key(key):
            # deal with potential symbolic links for 'default' packages
            paths = [self._deps[key]["path"], value]
            outpaths = []
            for pth in paths:
                if not pth.startswith("/"):
                    pth = os.path.join(os.environ["ASKAP_ROOT"], pth)
                pth = os.path.realpath(pth)
                outpaths.append(pth)
            if outpaths[0] == outpaths[1]:
                if libs:
                    if self._deps[key]["libs"] is not None:
                        # prepend the libs
                        self._deps[key][
                            "libs"] = libs + self._deps[key]["libs"]
                    else:
                        self._deps[key]["libs"] = libs
                    self._deps.toend(key)
                else:
                    # another dependency, so move it to the end, so link
                    # order is correct
                    self._deps.toend(key)
                return
            else:
                if overwrite:
                    self._deps[key]["path"] = value
                    self.q_print(
                        "info: Overwriting default package dependency '%s' with host specific package (from %s)"
                        % (key, value))
                elif self._autobuild:  # XXX maybe a mistake?
                    self.q_print(
                        "warn: Possible multiple version dependency \n\
                    %s != %s" % (self._deps[key]["path"], value))

                else:
                    raise BuildError("Multiple version dependency \n\
                    %s != %s" % (self._deps[key]["path"], value))
        else:
            self.q_print("info: Adding package dependency '%s' (from %s)" %
                         (key, value))
            # now update the dependency itself
            # XXX only used in Tools/scons_tools/askap_package.py
            if self.selfupdate:
                utils.update_tree(value)
            self._deps[key] = {
                "path": value,
                "libs": libs,
                "explicit": explicit
            }

    def _remove_duplicates(self, values):
        # find unique elements
        libs = [v[0] for v in values]
        for k in set(libs):
            # remove all but last duplicate entry
            while libs.count(k) > 1:
                idx = libs.index(k)
                libs.pop(idx)
                values.pop(idx)

    ## Add a ThirdPartyLibrary or ASKAP package to the environment
    #  This will add the package path in ASKAP_ROOT
    #  @param self the object reference
    #  @param pkgname The name of the package as in the repository, e.g.
    #  lapack. Default None means that this is defined in local
    #  dependencies.xyz
    #  @param tag The location of the package, e.g.
    #  3rdParty/lapack-3.1.1/lapack-3.1.1
    #  @param libs The name of the libraries to link against,
    #  default None is the same as the pkgname
    #  @param libdir The location of the library dir relative to the package,
    #  default None which will use settings in the package.info file
    #  @param incdir The location of the include dir relative to the package,
    #  default None which will use settings in the package.info file
    #  @param pymodule the 'require' statement to specify this dependency
    #  statement, e.g. "askap.loghandlers==current"
    def add_package(self,
                    pkgname=None,
                    tag=None,
                    libs=None,
                    libdir=None,
                    incdir=None,
                    bindir=None,
                    pymodule=None):
        self._deps = OrderedDict()

        if pkgname:
            if not tag:
                BuildError("No tag specified")
            if self.selfupdate:
                #if not os.path.exists(tag):
                utils.update_tree(tag)
            self._add_path(pkgname, self.ASKAPROOT, tag, libs, libdir, incdir,
                           bindir, pymodule)
            self.q_print("info: Adding package '%s'" % pkgname)

        if tag:
            tag = os.path.join(self.ASKAPROOT, tag)

        self._get_dependencies(tag, explicit=True)

        parent = ''
        for key, value in self._deps.iteritems():
            self._add_path(key,
                           self.ASKAPROOT,
                           value["path"],
                           libs=value["libs"],
                           parent=parent)
            parent = value["path"]

    # Add a ASKAP repository path to the environment
    # This sets up LIBPATH and CPPPATH
    def _add_path(self,
                  pkgname,
                  root,
                  tag,
                  parent='',
                  libs=None,
                  libdir=None,
                  incdir=None,
                  bindir=None,
                  pymodule=None):
        loc = None

        if tag.startswith("/"):  # external package
            loc = tag
        else:  # ASKAP package or 3rdParty library
            loc = os.path.join(root, tag)

        rloc = os.path.relpath(loc, self.ASKAPROOT)
        if not os.path.exists(loc):
            raise BuildError(
                "Dependency directory '%s' does not exist (requested by %s)." %
                (rloc, parent))

        self._rootdirs += [loc]
        info = self._get_info(loc)  # get optional package info
        idir = os.path.join(loc, self.INSTALL_SUBDIR)  # actual installion.

        if not bindir:  # add bin directory
            bindir = info["bindir"]

        if bindir:  # None means disabled in info file
            pth = os.path.join(idir, bindir)
            if os.path.exists(pth):
                self._bindirs += [pth]

        if not incdir:  # add include directory
            incdir = info["incdir"]

        if incdir:  # None means disabled in info file
            pth = os.path.join(idir, incdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: incdir '%s' does not exist." % pth)
            else:
                self._incdirs += [pth]

        if not libdir:  # add library directory
            libdir = info["libdir"]

        if libdir:  # None means disabled in info file
            pth = os.path.join(idir, libdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: libdir '%s' does not exist." % pth)
            else:
                self._ldlibpath += os.path.pathsep + pth
                self._libdirs += [(pth, idir)]

        libs = libs or info["libs"]
        addlibs = True

        if isinstance(libs, list) and len(libs) == 0:
            addlibs = False

        libs = libs or pkgname

        if not isinstance(libs, list):
            libs = [libs]

        if addlibs:  # only add lib if it's not a python module
            nlibs = []
            for lib in libs:
                instdir = idir
                if not glob.glob("{0}/lib{1}*".format(
                        os.path.join(idir, libdir), lib)):
                    instdir = ""
                nlibs.append((lib, instdir))
            self._libs += nlibs
            libs = self._libs[:]  # copy
            self._remove_duplicates(libs)
            self._libs = libs

        if info["defs"]:  # add package defines
            self._cppflags += info["defs"]

        if info["env"]:  # add environment variables
            self._env += info["env"]

        # check whether it is python, i.e. pymodule entry in package.info
        if not pymodule:
            pymodule = info["pymodule"]
        if pymodule:
            pth = os.path.join(idir, libdir, utils.get_site_dir())
            if self._pypath.find(pth) < 1:
                self._pypath = os.path.pathsep.join([pth, self._pypath])

        if info["jars"]:
            pth = os.path.join(idir, libdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: libdir '%s' does not exist." % pth)
            for jar in info["jars"]:
                jar = os.path.join(pth, jar)
                if jar not in self._jars:
                    self._jars.append(jar)
Exemple #42
0
def group_all_export(request, group_slug):
    """
    Export all group members for a specific group
    """
    group = get_object_or_404(Group, slug=group_slug)

    # if they can edit it, they can export it
    if not has_perm(request.user, 'user_groups.change_group', group):
        raise Http403

    import xlwt
    from ordereddict import OrderedDict
    from django.db import connection
    from tendenci.apps.forms_builder.forms.models import FieldEntry

    # create the excel book and sheet
    book = xlwt.Workbook(encoding='utf8')
    sheet = book.add_sheet('Group Members and Subscribers')

    #initialize indexes
    row_index = {}
    col_index = {}

    #---------
    # MEMBERS
    #---------
    # excel date styles
    default_style = xlwt.Style.default_style
    datetime_style = xlwt.easyxf(num_format_str='mm/dd/yyyy hh:mm')
    date_style = xlwt.easyxf(num_format_str='mm/dd/yyyy')

    # the key is what the column will be in the
    # excel sheet. the value is the database lookup
    # Used OrderedDict to maintain the column order
    group_mappings = OrderedDict([
        ('user_id', 'au.id'),
        ('first_name', 'au.first_name'),
        ('last_name', 'au.last_name'),
        ('email', 'au.email'),
        ('receives email', 'pp.direct_mail'),
        ('company', 'pp.company'),
        ('address', 'pp.address'),
        ('address2', 'pp.address2'),
        ('city', 'pp.city'),
        ('state', 'pp.state'),
        ('zipcode', 'pp.zipcode'),
        ('country', 'pp.country'),
        ('phone', 'pp.phone'),
        ('is_active', 'au.is_active'),
        ('date', 'gm.create_dt'),
    ])
    group_lookups = ','.join(group_mappings.values())

    # Use custom sql to fetch the rows because we need to
    # populate the user profiles information and you
    # cannot do that with django's ORM without using
    # profile for each user query
    # pulling 13,000 group members can be done in one
    # query using Django's ORM but then you need
    # 13,000 individual queries :(
    cursor = connection.cursor()
    sql = "SELECT %s FROM user_groups_groupmembership gm \
           INNER JOIN auth_user au ON (au.id = gm.member_id) \
           LEFT OUTER JOIN profiles_profile pp \
           on (pp.user_id = gm.member_id) WHERE group_id = %%s;"

    sql = sql % group_lookups
    cursor.execute(sql, [group.pk])
    values_list = list(cursor.fetchall())

    # index the group key mappings and insert them into the sheet.
    for key in group_mappings.keys():
        if not key in col_index:
            col = len(col_index.keys())
            col_index[key] = col
            sheet.write(0, col, key, style=default_style)

    if values_list:
        # Write the data enumerated to the excel sheet
        for row, row_data in enumerate(values_list):
            for col, val in enumerate(row_data):

                if not row in row_index:
                    # assign the row if it is not yet available
                    row_index[row] = row + 1

                # styles the date/time fields
                if isinstance(val, datetime):
                    style = datetime_style
                elif isinstance(val, date):
                    style = date_style
                else:
                    style = default_style

                sheet.write(row + 1, col, val, style=style)

    #-------------
    # Subscribers
    #-------------
    entries = FieldEntry.objects.filter(
        entry__subscriptions__group=group).distinct()

    for entry in entries:
        val = entry.value
        field = entry.field.label.lower().replace(" ", "_")

        if "subscriber %s" % str(entry.entry.pk) in row_index:
            # get the subscriber's row number
            row = row_index["subscriber %s" % str(entry.entry.pk)]
        else:
            # assign the row if it is not yet available
            row = len(row_index.keys()) + 1
            row_index["subscriber %s" % str(entry.entry.pk)] = row

        if field in col_index:
            # get the entry's col number
            col = col_index[field]
        else:
            # assign the col if it is not yet available
            # and label the new column
            col = len(col_index.keys())
            col_index[field] = col
            sheet.write(0, col, field, style=default_style)

        # styles the date/time fields
        if isinstance(val, datetime):
            style = datetime_style
        elif isinstance(val, date):
            style = date_style
        else:
            style = default_style

        sheet.write(row, col, val, style=style)

    response = HttpResponse(content_type='application/vnd.ms-excel')
    response[
        'Content-Disposition'] = 'attachment; filename=group_%s_all_export.xls' % group.pk
    book.save(response)
    return response
class FiniteDifference(Container):
    """ Differentiates a driver's workflow using the Finite Difference with
    Analytical Derivatives (FDAD) method. A variety of difference types are
    available for both first and second order."""

    implements(IDifferentiator)
    
    # pylint: disable-msg=E1101
    form = Enum("central", ["central", "forward", "backward"], iotype='in', \
                desc="Finite difference form (central, forward, backward).")
    
    default_stepsize = Float(1.0e-6, iotype='in', desc='Default finite ' + \
                             'difference step size.')
    
    def __init__(self):
        
        super(FiniteDifference, self).__init__()
        
        # This gets set in the callback
        _parent = None
        
        self.param_names = []
        self.objective_names = []
        self.eqconst_names = []
        self.ineqconst_names = []
        
        self.gradient_case = OrderedDict()
        self.gradient = {}
        
        self.hessian_ondiag_case = OrderedDict()
        self.hessian_offdiag_case = OrderedDict()
        self.hessian = {}
        
    def setup(self):
        """Sets some dimensions."""

        self.param_names = self._parent.get_parameters().keys()
        self.objective_names = self._parent.get_objectives().keys()
        
        try:
            self.ineqconst_names = self._parent.get_ineq_constraints().keys()
        except AttributeError:
            self.ineqconst_names = []
        try:
            self.eqconst_names = self._parent.get_eq_constraints().keys()
        except AttributeError:
            self.eqconst_names = []
        
        
    def get_derivative(self, output_name, wrt):
        """Returns the derivative of output_name with respect to wrt.
        
        output_name: string
            Name of the output in the local OpenMDAO hierarchy.
            
        wrt: string
            Name of the input in the local OpenMDAO hierarchy. The
            derivative is with respect to this variable.
        """
        
        return self.gradient[wrt][output_name]

    
    def get_2nd_derivative(self, output_name, wrt):
        """Returns the 2nd derivative of output_name with respect to both vars
        in the tuple wrt.
        
        output_name: string
            Name of the output in the local OpenMDAO hierarchy.
            
        wrt: tuple containing two strings
            Names of the inputs in the local OpenMDAO hierarchy. The
            derivative is with respect to these 2 variables.
        """
        
        return self.hessian[wrt[0]][wrt[1]][output_name]

    
    def get_gradient(self, output_name=None):
        """Returns the gradient of the given output with respect to all 
        parameters.
        
        output_name: string
            Name of the output in the local OpenMDAO hierarchy.
        """
        
        return array([self.gradient[wrt][output_name] for wrt in self.param_names])
        
        
    def get_Hessian(self, output_name=None):
        """Returns the Hessian matrix of the given output with respect to
        all parameters.
        
        output_name: string
            Name of the output in the local OpenMDAO hierarchy.
        """       
                

        #return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])
        return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])


    def calc_gradient(self):
        """Calculates the gradient vectors for all outputs in this Driver's
        workflow."""
        
        # Each component runs its calc_derivatives method.
        # We used to do this in the driver instead, but we've moved it in
        # here to make the interface more uniform.
        self._parent.calc_derivatives(first=True)
        
        self.setup()

        # Create our 2D dictionary the first time we execute.
        if not self.gradient:
            for name in self.param_names:
                self.gradient[name] = {}
                
        # Pull initial state and stepsizes from driver's parameters
        base_param = OrderedDict()
        stepsize = {}
        for key, item in self._parent.get_parameters().iteritems():
            base_param[key] = item.evaluate()
            
            if item.fd_step:
                stepsize[key] = item.fd_step
            else:
                stepsize[key] = self.default_stepsize

        # For Forward or Backward diff, we want to save the baseline
        # objective and constraints. These are also needed for the
        # on-diagonal Hessian terms, so we will save them in the class
        # later.
        base_data = self._run_point(base_param)
        
        # Set up problem based on Finite Difference type
        if self.form == 'central':
            deltas = [1, -1]
            func = diff_1st_central
        elif self.form == 'forward':
            deltas = [1, 0]
            func = diff_1st_fwrdbwrd
        else:
            deltas = [0, -1]
            func = diff_1st_fwrdbwrd

        self.gradient_case = OrderedDict()

        # Assemble input data
        for param in self.param_names:
            
            pcase = []
            for j_step, delta in enumerate(deltas):
                
                case = base_param.copy()
                case[param] += delta*stepsize[param]
                pcase.append({ 'param': case })
                
            self.gradient_case[param] = pcase
            
        # Run all "cases".
        # TODO - Integrate OpenMDAO's concurrent processing capability once it
        # is formalized. This operation is inherently paralellizable.
        for key, case in self.gradient_case.iteritems():
            for ipcase, pcase in enumerate(case):
                if deltas[ipcase]:
                    pcase['data'] = self._run_point(pcase['param'])
                else:
                    pcase['data'] = base_data
                
        
        # Calculate gradients
        for key, case in self.gradient_case.iteritems():
            
            eps = stepsize[key]
            
            for name in list(self.objective_names + \
                             self.eqconst_names + \
                             self.ineqconst_names):
                self.gradient[key][name] = \
                    func(case[0]['data'][name],
                         case[1]['data'][name], eps)

        # Save these for Hessian calculation
        self.base_param = base_param
        self.base_data = base_data

        
    def calc_hessian(self, reuse_first=False):
        """Returns the Hessian matrix for all outputs in the Driver's
        workflow.
        
        reuse_first: bool
            Switch to reuse some data from the gradient calculation so that
            we don't have to re-run some points we already ran (namely the
            baseline, +eps, and -eps cases.) Obviously you do this when the
            driver needs gradient and hessian information at the same point,
            and calls calc_gradient before calc_hessian.
        """
        
        # Each component runs its calc_derivatives method.
        # We used to do this in the driver instead, but we've moved it in
        # here to make the interface more uniform.
        self._parent.calc_derivatives(second=True)
        
        self.setup()
        
        # Create our 3D dictionary the first time we execute.
        if not self.hessian:
            for name1 in self.param_names:
                self.hessian[name1] = {}
                for name2 in self.param_names:
                    self.hessian[name1][name2] = {}
                
        self.hessian_ondiag_case = OrderedDict()
        self.hessian_offdiag_case = OrderedDict()

        # Pull stepsizes from driver's parameters
        base_param = OrderedDict()
        stepsize = {}
        for key, item in self._parent.get_parameters().iteritems():
            
            if item.fd_step:
                stepsize[key] = item.fd_step
            else:
                stepsize[key] = self.default_stepsize

        # Diagonal terms in Hessian always need base point
        # Usually, we will have saved this when we calculated
        # the gradient.
        if reuse_first:
            base_param = self.base_param
            base_data = self.base_data
        else:
            # Pull initial state from driver's parameters
            for key, item in self._parent.get_parameters().iteritems():
                base_param[key] = item.evaluate()
                    
            base_data = self._run_point(base_param)
            
        # Assemble input data
        # Cases : ondiag [fp, fm]
        deltas = [1, -1]
        for param in self.param_names:
            
            pcase = []
            for j_step, delta in enumerate(deltas):
                
                case = base_param.copy()
                case[param] += delta*stepsize[param]
                pcase.append({ 'param': case })
                
            self.hessian_ondiag_case[param] = pcase
            
        # Assemble input data
        # Cases : offdiag [fpp, fpm, fmp, fmm]
        deltas = [[1, 1],
                  [1, -1],
                  [-1, 1],
                  [-1, -1]]
        for i, param1 in enumerate(self.param_names):
            
            offdiag = {}
            for param2 in self.param_names[i+1:]:
            
                pcase = []
                for delta in deltas:
                    
                    case = base_param.copy()
                    case[param1] += delta[0]*stepsize[param1]
                    case[param2] += delta[1]*stepsize[param2]
                    pcase.append({ 'param': case })
                offdiag[param2] = pcase
                    
            self.hessian_offdiag_case[param1] = offdiag
            
        # Run all "cases".
        # TODO - Integrate OpenMDAO's concurrent processing capability once it
        # is formalized. This operation is inherently paralellizable.
        
        # We don't need to re-run on-diag cases if the gradients were
        # calculated with Central Difference.
        if reuse_first and self.form=='central':
            for key, case in self.hessian_ondiag_case.iteritems():
                
                gradient_case = self.gradient_case[key]
                for ipcase, pcase in enumerate(case):
                    
                    gradient_ipcase = gradient_case[ipcase]
                    pcase['data'] = gradient_ipcase['data'] 
        else:
            for case in self.hessian_ondiag_case.values():
                for pcase in case:
                    data = self._run_point(pcase['param'])
                    pcase['data'] = data

        # Off-diag cases must always be run.
        for cases in self.hessian_offdiag_case.values():
            for case in cases.values():
                for pcase in case:
                    pcase['data'] = self._run_point(pcase['param'])

                    
        # Calculate Hessians - On Diagonal
        for key, case in self.hessian_ondiag_case.iteritems():
            
            eps = stepsize[key]
            
            for name in list(self.objective_names + \
                             self.eqconst_names + \
                             self.ineqconst_names):
                self.hessian[key][key][name] = \
                    diff_2nd_xx(case[0]['data'][name],
                                base_data[name],
                                case[1]['data'][name], eps)
                
        # Calculate Hessians - Off Diagonal
        for key1, cases in self.hessian_offdiag_case.iteritems():
            
            eps1 = stepsize[key1]
            for key2, case in cases.iteritems():
                
                eps2 = stepsize[key2]
                
                for name in list(self.objective_names + \
                                 self.eqconst_names + \
                                 self.ineqconst_names):
                    self.hessian[key1][key2][name] = \
                        diff_2nd_xy(case[0]['data'][name],
                                    case[1]['data'][name],
                                    case[2]['data'][name],
                                    case[3]['data'][name],
                                    eps1, eps2)
                    
                    # Symmetry
                    # (Should ponder whether we should even store it.)
                    self.hessian[key2][key1][name] = \
                        self.hessian[key1][key2][name]
                    
    
    def _run_point(self, data_param):
        """Runs the model at a single point and captures the results. Note that 
        some differences require the baseline point."""

        dvals = [float(val) for val in data_param.values()]
        
        self._parent.set_parameters(dvals)

        # Run the model
        super(type(self._parent), self._parent).run_iteration()
        
        data = {}

        # Get Objectives
        for key, item in self._parent.get_objectives().iteritems():
            data[key] = item.evaluate(self._parent.parent)

        # Get Inequality Constraints
        if self.ineqconst_names:
            for key, item in self._parent.get_ineq_constraints().iteritems():
                val = item.evaluate(self._parent.parent)
                if '>' in val[2]:
                    data[key] = val[1]-val[0]
                else:
                    data[key] = val[0]-val[1]
        
        # Get Equality Constraints
        if self.eqconst_names:
            for key, item in self._parent.get_eq_constraints().iteritems():
                val = item.evaluate(self._parent.parent)
                if '>' in val[2]:
                    data[key] = val[1]-val[0]
                else:
                    data[key] = val[0]-val[1]
        
        return data
                    

    def reset_state(self):
        """Finite Difference does not leave the model in a clean state. If you
        require one, then run this method."""
        
        dvals = [float(val) for val in self.base_param.values()]
        self._parent.set_parameters(dvals)
        super(type(self._parent), self._parent).run_iteration()

        
    def raise_exception(self, msg, exception_class=Exception):
        """Raise an exception."""
        name = find_name(self._parent, self)
        self._parent.raise_exception("%s: %s" % (name,msg), exception_class)
class QuestionForm(BootstrapForm):

    currency = forms.ChoiceField(
                    choices=plugins.CurrencySelector().widget.widgets[0].choices,
                    label=_("Currency"),
                    help_text=_("Please select your currency")
                ) 
    baseline_year = forms.ChoiceField(
                        choices=((2005, 2005), (2006,2006), (2007,2007)),
                        label=_("Baseline data"),
                    ) 
    current_year = forms.ChoiceField(
                        choices=((2011,2011), (2010,2010)),
                        label=_("Latest data"),
                    ) 

    def __init__(self, *args, **kwargs):
        self.entity = kwargs.pop('entity')
        self.survey = kwargs.pop('survey')
        self.user = kwargs.pop('user')
        self.series = OrderedDict([(s.pk,s) for s in kwargs.pop('series')])
        self.country = kwargs.pop('country')
        self.collection_year = defaultdict(list)
        try:
            self.static = kwargs.pop('static')
        except KeyError:
            self.static = False

        super(QuestionForm, self).__init__(*args, **kwargs)
        self.layout = []

        self.responsesets = list(self.entity.responseset_set.filter(data_series=self.country).filter(data_series__in=self.series.values()))
        response_dict = dict([(r.pk,r) for r in self.responsesets])
        self.response_types = dict([(r.get_data_series_by_type()['Data collection year'],r) for r in self.responsesets])
        self.responses = Response.objects.filter(
                        current=True,
                        response_set__in=self.responsesets).\
                    select_related('question')
        self.question_dict = defaultdict(dict)
        self.questions = {}
        self.response = {}

        if self.static:
            for fieldname in ['currency','baseline_year','current_year']:
               field = self.fields[fieldname]
               field.widget.attrs['readonly'] = 'readonly'
               field.widget.attrs['disabled'] = 'disabled'
               field.widget.attrs['class'] = 'hidden'
               field.required = False
            

        collection_lookup = {"Baseline":"baseline_year","2012 collection":"current_year"}
        for responseset in self.responsesets:
            currency = responseset.get_meta('currency')
            if currency and not self.initial.get('currency'):
                self.initial['currency'] = currency['value']

            ds_dict = responseset.get_data_series_by_type()
            self.collection_year[ds_dict['Data collection year']].append(ds_dict['Year'])
            field = self.fields[collection_lookup[ds_dict['Data collection year'].name]]
            field.widget.attrs['readonly'] = 'readonly'
            field.widget.attrs['disabled'] = 'disabled'
            field.required = False

        for response in self.responses:
            #Limit db hits for responsesets
            response.response_set = response_dict[response.response_set_id]
            ds_dict = response.response_set.get_data_series_by_type()
            self.question_dict[response.question][ds_dict['Data collection year']]=response

        for collection, year in self.collection_year.items():
            self.initial[collection_lookup[collection.name]] = year[0].name
            rs = self.response_types.get(collection)
            if rs and (rs.editable == False or self.static):
               field = self.fields[collection_lookup[collection.name]]
               field.widget.attrs['readonly'] = 'readonly'
               field.widget.attrs['disabled'] = 'disabled'
               field.widget.attrs['class'] = 'hidden'
               field.required = False

        series_list = self.series.values()
        current_series = series_list[0]
        series_list.reverse()

        for group in self.survey.questiongroup_set.all().select_related('question'):
            fieldset = QuestionFieldset(group)
            for question in group.question_set.all():
                label = True
                self.questions[question.pk] = question
                if question.widget == 'textbox':
                    self.add_field_from_question(question, current_series)
                    fieldset.add_field('q_%s_%s' % (question.pk, current_series.pk))
                else:
                    fields = []
                    for series in series_list:
                        rs = self.response_types.get(series)
                        self.add_field_from_question(question, series, False)
                        fields.append('q_%s_%s' % (question.pk, series.pk))
                    fieldset.add_field({'question':question, 'fields':fields})
            self.layout.append(fieldset)

        for question, series in self.question_dict.items():
            for ds, response in series.items():
                key = 'q_%s_%s' % (response.question.pk, ds.pk)
                if key not in self.initial:
                    self.initial[key] = response.get_value()
                    year = response.response_set.get_data_series_by_type()['Year']
                    if self.initial[key] and year.name != self.collection_year[ds][0].name:
                        #TODO: Breaks for checkbox questions
                        self.initial[key] = ' '.join([self.initial[key], '(%s)' % year])
                    self.response['q_%s_%s' % (response.question.pk, ds.pk)] = response
            
    def add_field_from_question(self, question, series, label=True):
        read_only = self.static
        rs = self.response_types.get(series)
        if rs and rs.editable == False:
            read_only=True
        field = register.get_input_plugin(question.widget).plugin
        if label:
            label="""<span class="identifier">%s.</span> 
                     <span class="question">%s</span>""" % (question.i18n.identifier,question.i18n.question)
        else:
            label = {
                    "Baseline":_("Baseline data"),
                    "2012 collection":_("Latest data"),
                }.get(series.name)
        if issubclass(field, forms.DateField):
            field = field(
                        help_text = question.i18n.help_text,
                        label = label,
                        required = False,
                        widget = field.widget(years=('2005','2006','2007','2008','2009','2010','2011','2012'))
            )
        else:
            field = field(
                        help_text = question.i18n.help_text,
                        label = label,
                        required = False,
            )
        if read_only:
            field.widget.attrs['readonly'] = 'readonly'
            field.widget.attrs['disabled'] = 'disabled'
            field.widget.attrs['class'] = 'hidden'
        self.fields['q_%s_%s' % (question.pk,series.pk)] = field

    def is_valid(self):
        matcher = re.compile(r'\((\d+)\)')
        data = self.data.copy()
        self.custom_year = getattr(self,'custom_year', {})
        for key in data.keys():
            field = data.getlist(key)
            year = None
            new_value = []
            for item in field:
                check = matcher.split(item)
                if len(check)>1:
                    year = check[1]
                    new_value.append(check[0])
                else:
                    new_value.append(item)
            data.setlist(key, new_value)
            if year:
                if len(key.split('_'))>3:
                    key = '_'.join(key.split('_')[:-1])
                self.custom_year[key] = year
        self.data = data
        return super(QuestionForm, self).is_valid()

    def save(self):
        collection_year = dict([(x.name,y[0].name) for x,y in self.collection_year.items()])
        baseline_year = collection_year.get('Baseline', self.cleaned_data.get('baseline_year', None))
        if baseline_year:
            baseline_year = DataSeries.objects.get(name=baseline_year)
        else:
            baseline_year = None
        current_year = collection_year.get('2012 collection', self.cleaned_data.get('current_year', None))
        current_year = DataSeries.objects.get(name=current_year)
        currency = self.cleaned_data['currency']
        for responseset in self.responsesets:
            if responseset.editable:
                responseset.set_meta('currency',{'value':currency})

        del self.cleaned_data['currency']
        del self.cleaned_data['current_year']
        del self.cleaned_data['baseline_year']
        for key, value in self.cleaned_data.items():
            if key in self.custom_year:
                #TODO: actually save the shit :/
                pass
            if value != '' or key in self.response:
                if key in self.response:
                    if  self.response[key].get_value() != value and self.response[key].response_set.editable:
                        instance = Response(
                            response_set = self.response[key].response_set,
                            respondant = self.user,
                            question = self.response[key].question,
                            valid = True,
                            current = True
                        )
                        instance.value = {'value':value}
                        instance.save()
                else:
                    q, q_id, s_id = key.split('_')
                    question = self.questions[int(q_id)]
                    series = self.series[int(s_id)]

                    responseset = None
                    for rs in self.responsesets:
                        if set(rs.get_data_series()) == set([baseline_year, self.country, series]):
                            responseset = rs
                            break
                        elif set(rs.get_data_series()) == set([current_year, self.country, series]):
                            responseset = rs
                            break

                    if responseset == None:
                        responseset = ResponseSet(
                            survey=self.survey,
                            entity=self.entity
                        )
                        responseset.save()
                        responseset.data_series.add(self.country)
                        responseset.data_series.add(series)
                        if series.name == 'Baseline':
                            responseset.data_series.add(baseline_year)
                        else:
                            responseset.data_series.add(current_year)
                        self.responsesets.append(responseset)
                        responseset.set_meta('currency',{'value':currency})


                    if responseset.editable:
                        r = responseset.response_set.create(question=question, respondant=self.user, valid=True, current=True)
                        r.value = {'value':value}
                        r.save()
Exemple #45
0
def group_all_export(request, group_slug):
    """
    Export all group members for a specific group
    """
    group = get_object_or_404(Group, slug=group_slug)

    # if they can edit it, they can export it
    if not has_perm(request.user,'user_groups.change_group', group):
        raise Http403

    import xlwt
    from ordereddict import OrderedDict
    from django.db import connection
    from forms_builder.forms.models import FieldEntry

    # create the excel book and sheet
    book = xlwt.Workbook(encoding='utf8')
    sheet = book.add_sheet('Group Members and Subscribers')
    
    #initialize indexes
    row_index = {}
    col_index = {}
    
    #---------
    # MEMBERS
    #---------
    # excel date styles
    default_style = xlwt.Style.default_style
    datetime_style = xlwt.easyxf(num_format_str='mm/dd/yyyy hh:mm')
    date_style = xlwt.easyxf(num_format_str='mm/dd/yyyy')
    
    # the key is what the column will be in the
    # excel sheet. the value is the database lookup
    # Used OrderedDict to maintain the column order
    group_mappings = OrderedDict([
        ('user_id', 'au.id'),
        ('first_name', 'au.first_name'),
        ('last_name', 'au.last_name'),
        ('email', 'au.email'),
        ('receives email', 'pp.direct_mail'),
        ('company', 'pp.company'),
        ('address', 'pp.address'),
        ('address2', 'pp.address2'),
        ('city', 'pp.city'),
        ('state', 'pp.state'),
        ('zipcode', 'pp.zipcode'),
        ('country', 'pp.country'),
        ('phone', 'pp.phone'),
        ('is_active', 'au.is_active'),
        ('date', 'gm.create_dt'),
    ])
    group_lookups = ','.join(group_mappings.values())

    # Use custom sql to fetch the rows because we need to
    # populate the user profiles information and you
    # cannot do that with django's ORM without using
    # get_profile() for each user query
    # pulling 13,000 group members can be done in one
    # query using Django's ORM but then you need
    # 13,000 individual queries :(
    cursor = connection.cursor()
    sql = "SELECT %s FROM user_groups_groupmembership gm \
           INNER JOIN auth_user au ON (au.id = gm.member_id) \
           LEFT OUTER JOIN profiles_profile pp \
           on (pp.user_id = gm.member_id) WHERE group_id = %%s;"
    sql =  sql % group_lookups
    cursor.execute(sql, [group.pk])
    values_list = list(cursor.fetchall())

    # index the group key mappings and insert them into the sheet.
    for key in group_mappings.keys():
        if not key in col_index:
            col = len(col_index.keys())
            col_index[key] = col
            sheet.write(0, col, key, style=default_style)

    if values_list:
        # Write the data enumerated to the excel sheet
        for row, row_data in enumerate(values_list):
            for col, val in enumerate(row_data):
                
                if not row in row_index:
                    # assign the row if it is not yet available
                    row_index[row] = row + 1
                
                # styles the date/time fields
                if isinstance(val, datetime):
                    style = datetime_style
                elif isinstance(val, date):
                    style = date_style
                else:
                    style = default_style
                
                sheet.write(row + 1, col, val, style=style)
    
    #-------------
    # Subscribers
    #-------------
    entries = FieldEntry.objects.filter(entry__subscriptions__group=group).distinct()
    
    for entry in entries:
        val = entry.value
        field = entry.field.label.lower().replace(" ", "_")
        
        if "subscriber %s" % str(entry.entry.pk) in row_index:
            # get the subscriber's row number
            row = row_index["subscriber %s" % str(entry.entry.pk)]
        else:
            # assign the row if it is not yet available
            row = len(row_index.keys()) + 1
            row_index["subscriber %s" % str(entry.entry.pk)] = row
        
        if field in col_index:
            # get the entry's col number
            col = col_index[field]
        else:
            # assign the col if it is not yet available
            # and label the new column
            col = len(col_index.keys())
            col_index[field] = col
            sheet.write(0, col, field, style=default_style)
            
        # styles the date/time fields
        if isinstance(val, datetime):
            style = datetime_style
        elif isinstance(val, date):
            style = date_style
        else:
            style = default_style
        
        sheet.write(row, col, val, style=style)

    response = HttpResponse(mimetype='application/vnd.ms-excel')
    response['Content-Disposition'] = 'attachment; filename=group_%s_all_export.xls' % group.pk
    book.save(response)
    return response
Exemple #46
0
class PluginManager(object):

    ENVIRONMENT_HOOK = 'haas.hooks.environment'

    RESULT_HANDLERS = 'haas.result.handler'

    TEST_RUNNER = 'haas.runner'

    TEST_DISCOVERY = 'haas.discovery'

    _help = {
        TEST_DISCOVERY: 'The test discovery implementation to use.',
        TEST_RUNNER: 'Test runner implementation to use.',
        RESULT_HANDLERS: 'Test result handler implementation to use.',
    }

    _namespace_to_option_parts = {
        TEST_DISCOVERY: ['discovery'],
        TEST_RUNNER: ['runner'],
        RESULT_HANDLERS: ['result', 'handler'],
    }

    def __init__(self):
        self.hook_managers = OrderedDict()
        self.hook_managers[self.ENVIRONMENT_HOOK] = ExtensionManager(
            namespace=self.ENVIRONMENT_HOOK,
        )
        self.hook_managers[self.RESULT_HANDLERS] = ExtensionManager(
            namespace=self.RESULT_HANDLERS,
        )

        self.driver_managers = OrderedDict()
        self.driver_managers[self.TEST_DISCOVERY] = ExtensionManager(
            namespace=self.TEST_DISCOVERY,
        )
        self.driver_managers[self.TEST_RUNNER] = ExtensionManager(
            namespace=self.TEST_RUNNER,
        )

    @classmethod
    def testing_plugin_manager(cls, hook_managers, driver_managers):
        """Create a fabricated plugin manager for testing.

        """
        plugin_manager = cls.__new__(cls)
        plugin_manager.hook_managers = OrderedDict(hook_managers)
        plugin_manager.driver_managers = OrderedDict(driver_managers)
        return plugin_manager

    def _hook_extension_option_prefix(self, extension):
        name = uncamelcase(extension.name, sep='-').replace('_', '-')
        option_prefix = '--with-{0}'.format(name)
        dest_prefix = name.replace('-', '_')
        return option_prefix, dest_prefix

    def _namespace_to_option(self, namespace):
        parts = self._namespace_to_option_parts[namespace]
        option = '--{0}'.format('-'.join(parts))
        dest = '_'.join(parts)
        return option, dest

    def _add_hook_extension_arguments(self, extension, parser):
        option_prefix, dest_prefix = self._hook_extension_option_prefix(
            extension)
        extension.plugin.add_parser_arguments(
            parser, extension.name, option_prefix, dest_prefix)

    def _create_hook_plugin(self, extension, args, **kwargs):
        option_prefix, dest_prefix = self._hook_extension_option_prefix(
            extension)
        plugin = extension.plugin.from_args(
            args, extension.name, dest_prefix, **kwargs)
        if plugin is not None and plugin.enabled:
            return plugin
        return None

    def _add_driver_extension_arguments(self, extension, parser, option_prefix,
                                        dest_prefix):
        extension.plugin.add_parser_arguments(
            parser, option_prefix, dest_prefix)

    def add_plugin_arguments(self, parser):
        """Add plugin arguments to argument parser.

        Parameters
        ----------
        parser : argparse.ArgumentParser
            The main haas ArgumentParser.

        """
        for manager in self.hook_managers.values():
            if len(list(manager)) == 0:
                continue
            manager.map(self._add_hook_extension_arguments, parser)
        for namespace, manager in self.driver_managers.items():
            choices = list(sorted(manager.names()))
            if len(choices) == 0:
                continue
            option, dest = self._namespace_to_option(namespace)
            parser.add_argument(
                option, help=self._help[namespace], dest=dest,
                choices=choices, default='default')
            option_prefix = '{0}-'.format(option)
            dest_prefix = '{0}_'.format(dest)
            manager.map(self._add_driver_extension_arguments,
                        parser, option_prefix, dest_prefix)

    def get_enabled_hook_plugins(self, hook, args, **kwargs):
        """Get enabled plugins for specified hook name.

        """
        manager = self.hook_managers[hook]
        if len(list(manager)) == 0:
            return []
        return [
            plugin for plugin in manager.map(
                self._create_hook_plugin, args, **kwargs)
            if plugin is not None
        ]

    def get_driver(self, namespace, parsed_args, **kwargs):
        """Get mutually-exlusive plugin for plugin namespace.

        """
        option, dest = self._namespace_to_option(namespace)
        dest_prefix = '{0}_'.format(dest)
        driver_name = getattr(parsed_args, dest, 'default')
        driver_extension = self.driver_managers[namespace][driver_name]
        return driver_extension.plugin.from_args(
            parsed_args, dest_prefix, **kwargs)
Exemple #47
0
class HasParameters(object):
    """This class provides an implementation of the IHasParameters interface."""

    _do_not_promote = [
        'get_expr_depends', 'get_referenced_compnames',
        'get_referenced_varpaths', 'get_metadata'
    ]

    def __init__(self, parent):
        self._parameters = OrderedDict()
        self._allowed_types = ['continuous']
        if obj_has_interface(parent, ISolver):
            self._allowed_types.append('unbounded')
        self._parent = None if parent is None else weakref.ref(parent)

    def __getstate__(self):
        state = self.__dict__.copy()
        state['_parent'] = self.parent
        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        parent = state['_parent']
        self._parent = None if parent is None else weakref.ref(parent)

    @property
    def parent(self):
        """ The object we are a delegate of. """
        return None if self._parent is None else self._parent()

    def _item_count(self):
        """This is used by the replace function to determine if a delegate from
        the target object is 'empty' or not.  If it's empty, it's not an error
        if the replacing object doesn't have this delegate.
        """
        return len(self._parameters)

    def add_parameter(self,
                      target,
                      low=None,
                      high=None,
                      scaler=None,
                      adder=None,
                      start=None,
                      fd_step=None,
                      name=None,
                      scope=None):
        """Adds a parameter or group of parameters to the driver.

        target: string or iter of strings or Parameter
            What the driver should vary during execution. A *target* is an
            expression that can reside on the left-hand side of an assignment
            statement, so typically it will be the name of a variable or
            possibly a subscript expression indicating an entry within an array
            variable, e.g., x[3]. If an iterator of targets is given, then the
            driver will set all targets given to the same value whenever it
            varies this parameter during execution. If a Parameter instance is
            given, then that instance is copied into the driver with any other
            arguments specified, overiding the values in the given parameter.

        low: float (optional)
            Minimum allowed value of the parameter. If scaler and/or adder
            is supplied, use the transformed value here. If target is an array,
            this may also be an array, but must have the same size.

        high: float (optional)
            Maximum allowed value of the parameter. If scaler and/or adder
            is supplied, use the transformed value here. If target is an array,
            this may also be an array, but must have the same size.

        scaler: float (optional)
            Value to multiply the possibly offset parameter value by. If target
            is an array, this may also be an array, but must have the same size.

        adder: float (optional)
            Value to add to parameter prior to possible scaling. If target is
            an array, this may also be an array, but must have the same size.

        start: any (optional)
            Value to set into the target or targets of a parameter before
            starting any executions. If not given, analysis will start with
            whatever values are in the target or targets at that time. If target
            is an array, this may also be an array, but must have the same size.

        fd_step: float (optional)
            Step-size to use for finite difference calculation. If no value is
            given, the differentiator will use its own default. If target is an
            array, this may also be an array, but must have the same size.

        name: str (optional)
            Name used to refer to the parameter in place of the name of the
            variable referred to in the parameter string.
            This is sometimes useful if, for example, multiple entries in the
            same array variable are declared as parameters.

        scope: object (optional)
            The object to be used as the scope when evaluating the expression.

        If neither "low" nor "high" is specified, the min and max will
        default to the values in the metadata of the variable being
        referenced.
        """

        if isinstance(target, (ParameterBase, ParameterGroup)):
            self._parameters[target.name] = target
            target.override(low, high, scaler, adder, start, fd_step, name)
        else:
            if isinstance(target, basestring):
                names = [target]
                key = target
            else:
                names = target
                key = tuple(target)

            if name is not None:
                key = name

            dups = set(self.list_param_targets()).intersection(names)
            if len(dups) == 1:
                self.parent.raise_exception(
                    "'%s' is already a Parameter"
                    " target" % dups.pop(), ValueError)
            elif len(dups) > 1:
                self.parent.raise_exception(
                    "%s are already Parameter targets" % sorted(list(dups)),
                    ValueError)

            if key in self._parameters:
                self.parent.raise_exception("%s is already a Parameter" % key,
                                            ValueError)
            try:
                _scope = self._get_scope(scope)
                if len(names) == 1:
                    target = self._create(names[0], low, high, scaler, adder,
                                          start, fd_step, key, _scope)
                else:  # defining a ParameterGroup
                    parameters = [
                        self._create(n, low, high, scaler, adder, start,
                                     fd_step, key, _scope) for n in names
                    ]
                    types = set([p.valtypename for p in parameters])
                    if len(types) > 1:
                        raise ValueError("Can't add parameter %s because "
                                         "%s are not all of the same type" %
                                         (key, " and ".join(names)))
                    target = ParameterGroup(parameters)
                self._parameters[key] = target
            except Exception:
                self.parent.reraise_exception(info=sys.exc_info())

        self.parent.config_changed()

    def _create(self, target, low, high, scaler, adder, start, fd_step, key,
                scope):
        """ Create one Parameter or ArrayParameter. """
        try:
            expreval = ExprEvaluator(target, scope)
        except Exception as err:
            raise err.__class__("Can't add parameter: %s" % err)
        if not expreval.is_valid_assignee():
            raise ValueError("Can't add parameter: '%s' is not a"
                             " valid parameter expression" % expreval.text)
        try:
            val = expreval.evaluate()
        except Exception as err:
            val = None  # Let Parameter code sort out why.

        name = key[0] if isinstance(key, tuple) else key

        if isinstance(val, ndarray):
            return ArrayParameter(target,
                                  low=low,
                                  high=high,
                                  scaler=scaler,
                                  adder=adder,
                                  start=start,
                                  fd_step=fd_step,
                                  name=name,
                                  scope=scope,
                                  _expreval=expreval,
                                  _val=val,
                                  _allowed_types=self._allowed_types)
        else:
            return Parameter(target,
                             low=low,
                             high=high,
                             scaler=scaler,
                             adder=adder,
                             start=start,
                             fd_step=fd_step,
                             name=name,
                             scope=scope,
                             _expreval=expreval,
                             _val=val,
                             _allowed_types=self._allowed_types)

    def remove_parameter(self, name):
        """Removes the parameter with the given name."""
        param = self._parameters.get(name)
        if param:
            del self._parameters[name]
        else:
            self.parent.raise_exception(
                "Trying to remove parameter '%s' "
                "that is not in this driver." % (name, ), AttributeError)
        self.parent.config_changed()

    def config_parameters(self):
        """Reconfigure parameters from potentially changed targets."""
        for param in self._parameters.values():
            param.configure()

    def get_references(self, name):
        """Return references to component `name` in preparation for subsequent
        :meth:`restore_references` call.

        name: string
            Name of component being removed.
        """
        refs = OrderedDict()
        for pname, param in self._parameters.items():
            if name in param.get_referenced_compnames():
                refs[pname] = param
        return refs

    def remove_references(self, name):
        """Remove references to component `name`.

        name: string
            Name of component being removed.
        """
        to_remove = []
        for pname, param in self._parameters.items():
            if name in param.get_referenced_compnames():
                to_remove.append(pname)

        for pname in to_remove:
            self.remove_parameter(pname)

    def restore_references(self, refs):
        """Restore references to component `name` from `refs`.

        refs: object
            Value returned by :meth:`get_references`.
        """
        for pname, param in refs.items():
            try:
                self.add_parameter(param)
            except Exception as err:
                self.parent._logger.warning(
                    "Couldn't restore parameter '%s': %s" % (pname, str(err)))

    def list_param_targets(self):
        """Returns a list of parameter targets. Note that this
        list may contain more entries than the list of Parameter,
        ParameterGroup, and ArrayParameter objects since ParameterGroup
        instances have multiple targets.
        """
        targets = []
        for param in self._parameters.values():
            targets.extend(param.targets)
        return targets

    def list_param_group_targets(self):
        """Returns a list of tuples that contain the targets for each
        parameter group.
        """
        targets = []
        for param in self.get_parameters().values():
            targets.append(tuple(param.targets))
        return targets

    def clear_parameters(self):
        """Removes all parameters."""
        for name in self._parameters.keys():
            self.remove_parameter(name)
        self._parameters = OrderedDict()

    def get_parameters(self):
        """Returns an ordered dict of parameter objects."""
        return self._parameters

    def total_parameters(self):
        """Returns the total number of values to be set."""
        return sum([param.size for param in self._parameters.values()])

    def init_parameters(self):
        """Sets all parameters to their start value if a
        start value is given
        """
        scope = self._get_scope()
        for param in self._parameters.itervalues():
            if param.start is not None:
                param.set(param.start, scope)

    def set_parameter_by_name(self, name, value, case=None, scope=None):
Exemple #48
0
class PluginManager(object):

    ENVIRONMENT_HOOK = 'haas.hooks.environment'

    RESULT_HANDLERS = 'haas.result.handler'

    TEST_RUNNER = 'haas.runner'

    TEST_DISCOVERY = 'haas.discovery'

    _help = {
        TEST_DISCOVERY: 'The test discovery implementation to use.',
        TEST_RUNNER: 'Test runner implementation to use.',
        RESULT_HANDLERS: 'Test result handler implementation to use.',
    }

    _namespace_to_option_parts = {
        TEST_DISCOVERY: ['discovery'],
        TEST_RUNNER: ['runner'],
        RESULT_HANDLERS: ['result', 'handler'],
    }

    def __init__(self):
        self.hook_managers = OrderedDict()
        self.hook_managers[self.ENVIRONMENT_HOOK] = ExtensionManager(
            namespace=self.ENVIRONMENT_HOOK,
        )
        self.hook_managers[self.RESULT_HANDLERS] = ExtensionManager(
            namespace=self.RESULT_HANDLERS,
        )

        self.driver_managers = OrderedDict()
        self.driver_managers[self.TEST_DISCOVERY] = ExtensionManager(
            namespace=self.TEST_DISCOVERY,
        )
        self.driver_managers[self.TEST_RUNNER] = ExtensionManager(
            namespace=self.TEST_RUNNER,
        )

    @classmethod
    def testing_plugin_manager(cls, hook_managers, driver_managers):
        """Create a fabricated plugin manager for testing.

        """
        plugin_manager = cls.__new__(cls)
        plugin_manager.hook_managers = OrderedDict(hook_managers)
        plugin_manager.driver_managers = OrderedDict(driver_managers)
        return plugin_manager

    def _hook_extension_option_prefix(self, extension):
        name = uncamelcase(extension.name, sep='-').replace('_', '-')
        option_prefix = '--with-{0}'.format(name)
        dest_prefix = name.replace('-', '_')
        return option_prefix, dest_prefix

    def _namespace_to_option(self, namespace):
        parts = self._namespace_to_option_parts[namespace]
        option = '--{0}'.format('-'.join(parts))
        dest = '_'.join(parts)
        return option, dest

    def _add_hook_extension_arguments(self, extension, parser):
        option_prefix, dest_prefix = self._hook_extension_option_prefix(
            extension)
        extension.plugin.add_parser_arguments(
            parser, extension.name, option_prefix, dest_prefix)

    def _create_hook_plugin(self, extension, args, **kwargs):
        option_prefix, dest_prefix = self._hook_extension_option_prefix(
            extension)
        plugin = extension.plugin.from_args(
            args, extension.name, dest_prefix, **kwargs)
        if plugin is not None and plugin.enabled:
            return plugin
        return None

    def _add_driver_extension_arguments(self, extension, parser, option_prefix,
                                        dest_prefix):
        extension.plugin.add_parser_arguments(
            parser, option_prefix, dest_prefix)

    def add_plugin_arguments(self, parser):
        """Add plugin arguments to argument parser.

        Parameters
        ----------
        parser : argparse.ArgumentParser
            The main haas ArgumentParser.

        """
        for manager in self.hook_managers.values():
            if len(list(manager)) == 0:
                continue
            manager.map(self._add_hook_extension_arguments, parser)
        for namespace, manager in self.driver_managers.items():
            choices = list(sorted(manager.names()))
            if len(choices) == 0:
                continue
            option, dest = self._namespace_to_option(namespace)
            parser.add_argument(
                option, help=self._help[namespace], dest=dest,
                choices=choices, default='default')
            option_prefix = '{0}-'.format(option)
            dest_prefix = '{0}_'.format(dest)
            manager.map(self._add_driver_extension_arguments,
                        parser, option_prefix, dest_prefix)

    def get_enabled_hook_plugins(self, hook, args, **kwargs):
        """Get enabled plugins for specified hook name.

        """
        manager = self.hook_managers[hook]
        if len(list(manager)) == 0:
            return []
        return [
            plugin for plugin in manager.map(
                self._create_hook_plugin, args, **kwargs)
            if plugin is not None
        ]

    def get_driver(self, namespace, parsed_args, **kwargs):
        """Get mutually-exlusive plugin for plugin namespace.

        """
        option, dest = self._namespace_to_option(namespace)
        dest_prefix = '{0}_'.format(dest)
        driver_name = getattr(parsed_args, dest, 'default')
        driver_extension = self.driver_managers[namespace][driver_name]
        return driver_extension.plugin.from_args(
            parsed_args, dest_prefix, **kwargs)
Exemple #49
0
class CategoryRegistry(object):
    """Registry of all Category/Kind/Mixin instances currently known to the
    OCCI server or client.

    >>> reg = CategoryRegistry()
    >>> from occi.core import Category, ExtCategory, Kind, Mixin
    >>> from occi.ext.infrastructure import *
    >>> reg.register(ComputeKind)
    >>> reg.register(StorageKind)
    >>> reg.register(StorageLinkKind)
    >>> fooKind = Kind('foo', 'http://#', related=ResourceKind, location='compute/')
    >>> reg.register(fooKind)
    Traceback (most recent call last):
    Invalid: compute/: location path already defined
    >>> reg.lookup_id(ComputeKind)
    Kind('compute', 'http://schemas.ogf.org/occi/infrastructure#')
    >>> reg.lookup_location('storage/')
    Kind('storage', 'http://schemas.ogf.org/occi/infrastructure#')
    >>> reg.lookup_recursive('/link/')
    [Kind('storagelink', 'http://schemas.ogf.org/occi/infrastructure#')]
    >>> reg.lookup_recursive('/') == reg.all()
    True
    >>> reg.unregister(StorageKind)
    >>> reg.unregister(ComputeKind)
    >>> reg.unregister(EntityKind) ; reg.unregister(ResourceKind) ; reg.unregister(LinkKind)
    >>> reg.all()
    [Kind('storagelink', 'http://schemas.ogf.org/occi/infrastructure#')]

    """

    def __init__(self):
        self._categories = OrderedDict()
        self._locations = {}

        # Always register OCCI Core types
        self.register(EntityKind)
        self.register(ResourceKind)
        self.register(LinkKind)

    def register(self, category):
        """Register a new Category/Kind/Mixin."""
        s = str(category)
        if s in self._categories:
            raise Category.Invalid('%s: Category already registered' % s)

        # Location
        if hasattr(category, 'location') and category.location:
            if category.location in self._locations:
                raise Category.Invalid('%s: location path already defined' % category.location)
            self._locations[category.location] = category

        # Register category
        self._categories[s] = category

        # Register actions
        if hasattr(category, 'actions'):
            for action in category.actions:
                if hasattr(action, 'actions'):
                    raise Category.Invalid(
                            '%s: Only the base Category type allowed to identify Actions' % action)
                self.register(action)

    def unregister(self, category):
        """Unregister a previously registered Category/Kind/Mixin."""
        try:
            category = self._categories[str(category)]
        except KeyError:
            raise Category.Invalid("%s: Category not registered" % category)

        # Unregister category
        del self._categories[str(category)]

        # Remove location entry
        if hasattr(category, 'location') and category.location:
            self._locations.pop(category.location, None)

        # Remove additional action categories
        if hasattr(category, 'actions'):
            for action in category.actions:
                self.unregister(action)

    def lookup_id(self, identifier):
        try:
            return self._categories[str(identifier)]
        except KeyError:
            raise Category.DoesNotExist('"%s": Category does not exist' % identifier)

    def lookup_location(self, path):
        loc = path.lstrip('/')
        return self._locations.get(loc)

    def lookup_recursive(self, path):
        """Find all categories registered at a location below the specified
        path.
        """
        loc = path.lstrip('/')
        if not loc:
            return self.all()
        categories = []
        for location, category in self._locations.iteritems():
            if location.startswith(loc):
                categories.append(category)
        return categories

    def all(self):
        return self._categories.values()
Exemple #50
0
class RenderableContainer(object):
    is_group = True

    def __init__(self):

        self._components = []
        self._componentmap = OrderedDict()
        self._bindmap = OrderedDict()

    def __json__(self, request):
        return {"components": self._componentmap}

    def rmRenderable(self, renderable_id):

        renderable = self._componentmap.pop(renderable_id)
        self._components.remove(renderable)

    def addRenderable(self, renderable, pos=None):
        """ Add renderable. If pos is given, insert into that
        position, otherwise just append"""

        if pos is None:
            self._components.append(renderable)
        else:
            self._components.insert(pos, renderable)

        # todo: see if we can use the pos parameter for the insertion
        # into the _componentmap ordereddict
        self._componentmap[renderable.id] = renderable

        if getattr(renderable, 'bind', None):
            self._bindmap[renderable.bind] = renderable

    def getRenderables(self, recursive=False):
        """ retrieve all renderables. If recursive is true, then
        also return all renderables from the children recursively """

        if recursive:
            result = self._componentmap.values()
            for r in self._componentmap.values():
                try:
                    result += r.getRenderables(recursive)
                except:
                    pass
            return result

        else:
            return self._components

    def getRenderable(self, id):
        """ find renderable by id in the complete (recursive) tree """

        found = self._componentmap.get(id, None)
        if not found:
            # search children
            for r in self.getRenderables(False):
                try:
                    found = r.getRenderable(id)
                    if found:
                        break
                except:
                    pass
        return found

    def getRenderableByBind(self, bind):

        found = self._bindmap.get(bind, None)
        if not found:
            # search children
            for r in self.getRenderables(False):
                try:
                    found = r.getRenderableByBind(bind)
                    if found:
                        break
                except:
                    pass
        return found
Exemple #51
0
class CnCGraph(object):
    def __init__(self, name, g):
        verifyCollectionDecls("item", g.itemColls)
        steps = [x.step for x in g.stepRels]
        verifyCollectionDecls("step", steps)
        self.name = name
        # items
        self.itemDeclarations = OrderedDict(
            (i.collName, makeItemDecl(i)) for i in g.itemColls)
        self.concreteItems = [
            i for i in self.itemDeclarations.values() if not i.isVirtual
        ]
        # item virtual mappings
        self.vms = [i for i in self.itemDeclarations.values() if i.isVirtual]
        self.inlineVms = [i for i in self.vms if i.isInline]
        self.externVms = [i for i in self.vms if not i.isInline]
        # steps / pseudo-steps
        self.stepFunctions = OrderedDict(
            (x.step.collName, StepFunction(x)) for x in g.stepRels)
        verifyEnv(self.stepFunctions)
        self.initFunction = self.stepFunctions.pop(initNameRaw)
        self.initFunction.collName = "cncInitialize"
        self.finalizeFunction = self.stepFunctions.pop(finalizeNameRaw)
        self.finalizeFunction.collName = "cncFinalize"
        self.finalAndSteps = [self.finalizeFunction
                              ] + self.stepFunctions.values()
        # set up step attribute lookup dict
        self.stepLikes = OrderedDict(self.stepFunctions)
        self.stepLikes[self.initFunction.collName] = self.initFunction
        self.stepLikes[self.finalizeFunction.collName] = self.finalizeFunction
        # attribute tracking
        self.allAttrNames = set()
        # context
        self.ctxParams = filter(bool, map(
            strip, g.ctx.splitlines())) if g.ctx else []

    def hasTuning(self, name):
        return name in self.allAttrNames

    def hasCustomDist(self):
        return self.hasTuning('distfn') or self.hasTuning('placeWith')

    def lookupType(self, item):
        return self.itemDeclarations[item.collName].type

    def itemDistFn(self, collName, ranksExpr):
        return getDistFn(self.itemDeclarations, collName, ranksExpr)

    def stepDistFn(self, collName, ranksExpr):
        return getDistFn(self.stepLikes, collName, ranksExpr)

    def itemTuningFn(self, collName, name, ranksExpr, default):
        return getTuningFn(self.itemDeclarations, collName, name, ranksExpr,
                           default)

    def stepTuningFn(self, collName, name, ranksExpr, default):
        return getTuningFn(self.stepLikes, collName, name, ranksExpr, default)

    def priorityFn(self, collName, ranksExpr):
        return self.stepTuningFn(collName, 'priority', ranksExpr, "0")

    def addTunings(self, tuningSpec):
        for t in tuningSpec.itemTunings:
            x = self.itemDeclarations.get(t.collName)
            assert x, "Unknown item in tuning: {0}".format(t.collName)
            x.attrs.update(t.attrs)
            self.allAttrNames.update(t.attrs.keys())
        for t in tuningSpec.stepTunings:
            x = self.stepLikes.get(t.collName)
            assert x, "Unknown step in tuning: {0}".format(t.collName)
            if t.inputName:
                i = x.inputsDict.get(t.inputName)
                assert i, "Unknown input in tuning: {0} <- {1}".format(
                    t.collName, t.inputName)
                i.attrs.update(t.attrs)
                self.allAttrNames.update(t.attrs.keys())
            else:
                x.attrs.update(t.attrs)
                self.allAttrNames.update(t.attrs.keys())
Exemple #52
0
class ResourceBuilder(object):
    """ Helper to create a ressource """
    def __init__(self, name=None, required=False):
        self._name = name
        self._fields = OrderedDict()
        self._required = required

    def add_field(self,
                  field,
                  arg=None,
                  value=None,
                  extended=False,
                  hidden=False,
                  e_type=str,
                  required=None):
        """Add a new field to the current ResourceBuilder.

           Keyword arguments:
           field    -- field name
           arg      -- name of the attribute name in arg object (argparse)
           value    -- a default for this field, used for resource creation.
           extended -- If set to true, the current field will be display in
                       extended list mode only.
           hidden   -- If set to true, the current field won't be exposed
                       as available keys.
           e_type   -- field data type (default str)
           required -- True if the current field is required for create
                       and update methods
        """
        if required is None:
            required = self._required
        if arg is None:
            arg = re.sub('(?!^)([A-Z]+)', r'_\1', field).lower()
        self._fields[field] = {
            'field': field,
            'arg': arg,
            'value': value,
            'extended': extended,
            'required': required,
            'e_type': e_type,
            'hidden': hidden
        }

    def get_keys(self, extended=False):
        res = []
        for field in self._fields.values():
            if field['hidden']:
                continue
            if not field['extended']:
                res.append(field['field'])
            if extended and field['extended']:
                res.append(field['field'])
        return res

    def get_fields(self, extended=False, full=False):
        res = []
        if extended:
            for field in self._fields.values():
                if field['extended']:
                    res.append(field['field'])
        elif full:
            for field in self._fields.keys():
                res.append(field)
        else:
            for field in self._fields.values():
                if not field['extended']:
                    res.append(field['field'])
        return res

    def set_arg(self, key, arg):
        field = self._fields.get(key, None)
        if field is not None:
            field['arg'] = arg

    def get_value(self, key):
        field = self._fields.get(key, None)
        if field is not None:
            return field['value']
        else:
            return None

    def set_value(self, key, value):
        field = self._fields.get(key, None)
        if field is not None:
            field['value'] = value

    def to_resource(self):
        ret = {}
        for field in self._fields.values():
            ret[field['field']] = field['value']
        return ret

    def load_from_args(self, namespace):
        for field in self._fields.values():
            value = getattr(namespace, field['arg'], None)
            if value is not None:
                field['value'] = value

    def copy(self, data):
        if isinstance(data, dict):
            for field, val in self._fields.items():
                val['value'] = data.get(field, "")

        if isinstance(data, ResourceBuilder):
            for field, val in self._fields.items():
                val['value'] = data[field]['value']

    def __str__(self):
        return json.dumps(self.to_resource(), sort_keys=True, indent=2)

    def check_required_fields(self):
        for field in self._fields.values():
            if field['required']:
                value = field['value']
                if value is None:
                    raise ValueError("missing value for required field : " +
                                     field['field'])
                e_type = field['e_type']
                if e_type == int:
                    int(value)
                if e_type == float:
                    float(value)