def update_metadata(self, new_metadata): if not new_metadata: return print "Update metadata?" has_changes = False for key, val in new_metadata.items(): if self.metadata.get(key) != val: has_changes = True if not has_changes: return print "Has changes" data = deepcopy(self.metadata) data.update(new_metadata) data = OrderedDict(sorted(data.items(), key=lambda t: t[0])) new_json = json.dumps(data, indent=4) f = codecs.open(self.full_local_path, 'r', 'utf-8') org_content = f.read() f.close() def replacer(m): parts = m.group(0).split('\n') return parts[0].strip() + '\n' + new_json + '\n' + parts[-1].strip() new_content = self._json_comment_re.sub(replacer, org_content) print new_content if new_content == org_content: return print "Write new content" f = codecs.open(self.full_local_path, 'w', 'utf-8') f.write(new_content) f.close()
def collectFilesForApp(self, app): fileBodies = [] totalValues = OrderedDict() rootDir = app.getPreviousWriteDirInfo() if not os.path.isdir(rootDir): sys.stderr.write("No temporary directory found at " + rootDir + " - not collecting batch reports.\n") return dirlist = os.listdir(rootDir) dirlist.sort() compulsoryVersions = set(app.getBatchConfigValue("batch_collect_compulsory_version")) versionsFound = set() for dir in dirlist: fullDir = os.path.join(rootDir, dir) if os.path.isdir(fullDir) and self.matchesApp(dir, app): currBodies, currVersions = self.parseDirectory(fullDir, app, totalValues) fileBodies += currBodies versionsFound.update(currVersions) if len(fileBodies) == 0: self.diag.info("No information found in " + rootDir) return missingVersions = compulsoryVersions.difference(versionsFound) mailTitle = self.getTitle(app, totalValues) mailContents = self.mailSender.createMailHeaderForSend(self.runId, mailTitle, app) mailContents += self.getMailBody(app, fileBodies, missingVersions) allCats = set(totalValues.keys()) noMailCats = set([ "succeeded", "known bugs" ]) allSuccess = allCats.issubset(noMailCats) self.mailSender.sendOrStoreMail(app, mailContents, isAllSuccess=allSuccess)
def begetDataBody(self, *pa, **kwa): """ Returns duple (data,body) for request from arguments positional arguments may be either strings, sequences of key,value duples, or dictionaries. Body is string concated from all string positional arguments. Non string positional arguments and keyword arguments are ignored. Data is an ordereddict where the items are extracted from self.preloads, any sequenced duples and/or dicts in the positonal arguments, and from the keyword arguments. String positional arguments are ignored. """ body = '' data = ODict(self.preloads) #load defaults for a in pa: #extract key/value pairs from positional parameters if isinstance(a, basestring): # string body = '%s%s' % (body, a) #concat else: # dict or sequence of duples data.update(a) data.update(kwa) #so insert keyword arguments if isinstance(body, unicode): body = body.encode('utf-8') if 'utf-8' not in self.headers['content-type']: self.headers['content-type'] += '; charset=utf-8' return (data, body)
def _get_csv_data(user_profile): data = OrderedDict() data['application_status_ID'] = user_profile.applicationstatus.pk data['profile'] = _get_profile_data(user_profile) data.update(_get_csv_form_data(user_profile)) # data ['optional_subjects'] = _get_other_subjects_data( user_profile ) return data
def parse(tokens): if tokens[0] == '{': ret = OrderedDict() tokens = tokens[1:] while tokens[0] != '}': key = tokens[0] tokens = tokens[1:] tokens = tokens[1:] # : value, tokens = parse(tokens) if tokens[0] == ',': tokens = tokens[1:] ret[key] = value tokens = tokens[1:] return ret, tokens elif tokens[0] == '[': ret = [] tokens = tokens[1:] while tokens[0] != ']': value, tokens = parse(tokens) if tokens[0] == ',': tokens = tokens[1:] ret.append(value) tokens = tokens[1:] return ret, tokens else: return tokens[0], tokens[1:]
class EmailResponder(plugins.Responder): def __init__(self, optionMap, *args): plugins.Responder.__init__(self) self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date self.batchAppData = OrderedDict() self.allApps = OrderedDict() def notifyComplete(self, test): if test.app.emailEnabled(): if not self.batchAppData.has_key(test.app): self.addApplication(test) self.batchAppData[test.app].storeCategory(test) def getRootSuite(self, test): if test.parent: return self.getRootSuite(test.parent) else: return test def addApplication(self, test): rootSuite = self.getRootSuite(test) app = test.app self.batchAppData[app] = BatchApplicationData(rootSuite) self.allApps.setdefault(app.name, []).append(app) def notifyAllComplete(self): mailSender = MailSender(self.runId) for appList in self.allApps.values(): batchDataList = map(self.batchAppData.get, appList) mailSender.send(batchDataList)
def read_all(self): result = OrderedDict() self.native_book = self._get_book() for sheet in self.native_book.sheets(): data_dict = self.read_sheet(sheet) result.update(data_dict) return result
def __init__(self): if not hasattr(self.__class__, 'FILTERS'): self.__class__.FILTERS = dexy.introspect.filters(Constants.NULL_LOGGER) if not hasattr(self.__class__, 'SOURCE_CODE'): artifact_class_source = inspect.getsource(self.__class__) artifact_py_source = inspect.getsource(Artifact) self.__class__.SOURCE_CODE = hashlib.md5(artifact_class_source + artifact_py_source).hexdigest() self._inputs = {} self.additional = None self.db = [] # accepts 'append' self.args = {} self.args['globals'] = {} self.is_last = False self.artifact_class_source = self.__class__.SOURCE_CODE self.artifacts_dir = 'artifacts' # TODO don't hard code self.batch_id = None self.binary_input = None self.binary_output = None self.ctime = None self.data_dict = OrderedDict() self.dexy_version = Version.VERSION self.dirty = False self.document_key = None self.elapsed = 0 self.final = None self.initial = None self.inode = None self.input_data_dict = OrderedDict() self.key = None self.log = logging.getLogger() self.mtime = None self.state = 'new'
def edge_list(self): """ Return the list of edges for the derivatives of this workflow. """ self._edges = super(CyclicWorkflow, self).edge_list() # TODO: Shouldn't have to do this everytime. if len(self._mapped_severed_edges) > 0: cyclic_edges = OrderedDict() for edge in self._mapped_severed_edges: cyclic_edges[edge[0]] = edge[1] # Finally, modify our edge list to include the severed edges, and exclude # the boundary edges. for src, targets in self._edges.iteritems(): if '@in' not in src or \ not any(edge in cyclic_edges.values() for edge in targets): if isinstance(targets, str): targets = [targets] newtargets = [] for target in targets: if '@out' not in target or \ src not in cyclic_edges: newtargets.append(target) if len(newtargets) > 0: cyclic_edges[src] = newtargets self._edges = cyclic_edges return self._edges
def __init__(self, file, mode='file'): """Start dividing 'file' in chunks. 'file' is the to chunk up. By default it is the name of a file on the filesystem, but with 'mode' set to 'string', 'file' is passed as a string. """ log.debug("Initialising module parser for file '%s'.", file) # Dictionary inits self.classes = OrderedDict() self.functions = OrderedDict() self.protectedSections = OrderedDict() self.protectionDeclarations = [] # Read and mangle the file self.filebuf = self.readFile(file, mode) self.splitSource() # Note: ast = abstract syntax tree (python internal thingy), # generated by the imported 'parser'. self.filebuf = self.filebuf.encode(self.encoding) self.ast = parser.suite(self.filebuf) self.code = self.ast.compile() # The next two filter out the classes and the top-level # functions in the sourcefile and the protected # sections. Beware that the rest is left out! self.findClassesAndFunctions() self.findProtectedSections() self.findProtectionDeclarations()
def get_field_names_for_question_set(questions, respondent_set): fields = OrderedDict() for qu in questions: field_names = CsvFieldGenerator.get_field_names_for_question(qu, respondent_set) if field_names is not None: fields = OrderedDict(fields.items() + field_names.items()) return fields
def test_delitem(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) del od['a'] self.assert_('a' not in od) self.assertRaises(KeyError, od.__delitem__, 'a') self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_clear(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) self.assertEqual(len(od), len(pairs)) od.clear() self.assertEqual(len(od), 0)
def scan_file(file_name, subfile): # start analysis with basic info pe = pefile.PE(file_name) machine = pe.FILE_HEADER.Machine all_results = OrderedDict( [ ('MD5', get_hash(file_name, 'md5')), ('SHA1', get_hash(file_name, 'sha1')), ('SHA256', get_hash(file_name, 'sha256')), ('Type', commands.getoutput('file %s' % file_name).split(file_name + ': ')[1]), ('Size', (os.path.getsize(file_name))/1000), ('SSDeep', get_ssdeep(file_name)), #('ImpHash', pe.get_imphash()), ('Arch', pefile.MACHINE_TYPE[machine]), ('Entry Point', hex(pe.OPTIONAL_HEADER.AddressOfEntryPoint)), ('Compiled', datetime.datetime.fromtimestamp(pe.FILE_HEADER.TimeDateStamp)), ('Start Address', grep_saddress(file_name)) ] ) print '\n[ %s ]' % file_name for key, value in all_results.iteritems(): if key == 'Compiled' or key == 'Entry Point' or key == 'Start Address': print '%s:\t\t%s' % (key, value) else: print '%s:\t\t\t%s' % (key, value) if subfile: print '\n' check_subfile(file_name)
def _parse(self, menu): di = OrderedDict() for value in menu: id = value.get('id', value.get('title')) extra = value.get('extra', None) separator = False if extra: separator = extra.get('separator', None) title = value.get('title') if isinstance(title, str): translated_title = translate(_p(title), context=self.request) else: translated_title = translate(value.get('title'), domain=_p, context=self.request) su = OrderedDict(label=translated_title, action=self._action(value.get('action', '')), icon=value.get('icon', ''), _class=value.get('class', ''), separator_before=separator, _disabled=(not value.get('action', False) and separator) and True or False, ) if value.get('submenu', None): su.update(OrderedDict(submenu=self._parse(value.get('submenu')))) di[id] = su return di
def update_metadata(self, new_metadata): if not new_metadata: return has_changes = False for key, val in new_metadata.items(): if self.metadata.get(key) != val: has_changes = True if not has_changes: return data = deepcopy(self.metadata) data.update(new_metadata) data = OrderedDict(sorted(data.items(), key=lambda t: t[0])) new_json = json.dumps(data, indent=4) org_content = _read_unicode_file_dammit(self.full_local_path) def replacer(m): parts = m.group(0).split('\n') return parts[0].strip() + '\n' + new_json + '\n' + parts[-1].strip() new_content = self._json_comment_re.sub(replacer, org_content) if new_content == org_content: return logger.debug("Writing new metadata") f = codecs.open(self.full_local_path, 'w', 'utf-8') f.write(new_content) f.close()
class JUnitResponder(plugins.Responder): """Respond to test results and write out results in format suitable for JUnit report writer. Only does anything if the app has batch_junit_format:true in its config file """ def __init__(self, optionMap, *args): plugins.Responder.__init__(self) self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date self.allApps = OrderedDict() self.appData = OrderedDict() def useJUnitFormat(self, app): return app.getBatchConfigValue("batch_junit_format") == "true" def notifyComplete(self, test): if not self.useJUnitFormat(test.app): return if not self.appData.has_key(test.app): self._addApplication(test) self.appData[test.app].storeResult(test) def notifyAllComplete(self): # allApps is {appname : [app]} for appList in self.allApps.values(): # appData is {app : data} for app in appList: if self.useJUnitFormat(app): data = self.appData[app] ReportWriter(self.runId).writeResults(app, data) def _addApplication(self, test): app = test.app self.appData[app] = JUnitApplicationData() self.allApps.setdefault(app.name, []).append(app)
class ActionRunner(BaseActionRunner): def __init__(self, optionMap, *args): BaseActionRunner.__init__(self, optionMap, logging.getLogger("Action Runner")) self.currentTestRunner = None self.previousTestRunner = None self.appRunners = OrderedDict() def addSuite(self, suite): plugins.log.info("Using " + suite.app.description(includeCheckout=True)) appRunner = ApplicationRunner(suite, self.diag) self.appRunners[suite.app] = appRunner def notifyAllReadAndNotified(self): # kicks off processing. Don't use notifyAllRead as we end up running all the tests before # everyone's been notified of the reading. self.runAllTests() def notifyRerun(self, test): if self.currentTestRunner and self.currentTestRunner.test is test: self.diag.info("Got rerun notification for " + repr(test) + ", resetting actions") self.currentTestRunner.resetActionSequence() def runTest(self, test): # We have the lock coming in to here... appRunner = self.appRunners.get(test.app) if appRunner: self.lock.acquire() self.currentTestRunner = TestRunner(test, appRunner, self.diag, self.exited, self.killSignal) self.lock.release() self.currentTestRunner.performActions(self.previousTestRunner) self.previousTestRunner = self.currentTestRunner self.lock.acquire() self.currentTestRunner = None self.notifyComplete(test) self.lock.release() def killTests(self): if self.currentTestRunner: self.currentTestRunner.kill(self.killSignal) def killOrCancel(self, test): if self.currentTestRunner and self.currentTestRunner.test is test: self.currentTestRunner.kill() else: self.cancel(test) def getAllActionClasses(self): classes = set() for appRunner in self.appRunners.values(): for action in appRunner.actionSequence: classes.add(action.__class__) return classes def cleanup(self): for actionClass in self.getAllActionClasses(): actionClass.finalise() for appRunner in self.appRunners.values(): appRunner.cleanActions()
class TestIteratorMap: def __init__(self, dynamic, allApps): self.dict = OrderedDict() self.dynamic = dynamic self.parentApps = {} for app in allApps: for extra in [ app ] + app.extras: self.parentApps[extra] = app def getKey(self, test): if self.dynamic: return test elif test is not None: return self.parentApps.get(test.app, test.app), test.getRelPath() def store(self, test, iter): self.dict[self.getKey(test)] = iter def updateIterator(self, test, oldRelPath): # relative path of test has changed key = self.parentApps.get(test.app, test.app), oldRelPath iter = self.dict.get(key) if iter is not None: self.store(test, iter) del self.dict[key] return iter else: return self.getIterator(test) def getIterator(self, test): return self.dict.get(self.getKey(test)) def remove(self, test): key = self.getKey(test) if self.dict.has_key(key): del self.dict[key]
def __init__(self, name, g): verifyCollectionDecls("item", g.itemColls) steps = [ x.step for x in g.stepRels ] verifyCollectionDecls("step", steps) self.name = name # items self.itemDeclarations = OrderedDict((i.collName, makeItemDecl(i)) for i in g.itemColls) self.concreteItems = [ i for i in self.itemDeclarations.values() if not i.isVirtual ] # item virtual mappings self.vms = [ i for i in self.itemDeclarations.values() if i.isVirtual ] self.inlineVms = [ i for i in self.vms if i.isInline ] self.externVms = [ i for i in self.vms if not i.isInline ] # steps / pseudo-steps self.stepFunctions = OrderedDict((x.step.collName, StepFunction(x)) for x in g.stepRels) verifyEnv(self.stepFunctions) self.initFunction = self.stepFunctions.pop(initNameRaw) self.initFunction.collName = "cncInitialize" self.finalizeFunction = self.stepFunctions.pop(finalizeNameRaw) self.finalizeFunction.collName = "cncFinalize" self.finalAndSteps = [self.finalizeFunction] + self.stepFunctions.values() # set up step attribute lookup dict self.stepLikes = OrderedDict(self.stepFunctions) self.stepLikes[self.initFunction.collName] = self.initFunction self.stepLikes[self.finalizeFunction.collName] = self.finalizeFunction # attribute tracking self.allAttrNames = set() # context self.ctxParams = filter(bool, map(strip, g.ctx.splitlines())) if g.ctx else []
def __init__(self, filePath=None, key=None): OrderedDict.__init__(self) if filePath != None and os.path.isfile(filePath): self.read(filePath, key) else: self._filename = filePath self.curStanza = None
def get(self, row, columns=None, column_start=None, super_column=None, column_finish=None, column_count=100): try: if super_column is None: data_columns = self.data[row] else: data_columns = self.data[row][super_column] results = OrderedDict() count = 0 if columns is not None: for c in columns: results[c] = data_columns[c] else: for c in sorted(data_columns.keys()): if count > column_count: break if column_start and cmp(c,column_start) < 0: continue if column_finish and cmp(c, column_finish) > 0: break results[c] = data_columns[c] count += 1 if not len(results): raise NotFoundException for key, value in results.items(): if isinstance(value, dict) and len(value) == 0: del(results[key]) if value is None: del(results[key]) return results except KeyError: raise NotFoundException
def kmeans_get_cluster_values(data, clusters): values = OrderedDict() for dataKey, cluster in clusters.items(): if not values.has_key(cluster): values[cluster] = [] values[cluster].append(data[dataKey]) return values
def test_form(req): # get_var_info = {'len': len(args)} var_info = OrderedDict(( ('form_keys', req.form.keys()), ('form_values', ', '.join(tuple(uni(req.form.getvalue(key)) for key in req.form.keys()))), ('form_getfirst', '%s,%s' % (req.form.getfirst('pname'), req.form.getfirst('px'))), ('form_getlist', '%s,%s' % (req.form.getlist('pname'), req.form.getlist('px'))), ('', ''), ('args_keys', req.args.keys()), ('args_values', ', '.join(tuple(uni(req.args[key]) for key in req.args.keys()))), ('args_getfirst', '%s,%s' % (req.args.getfirst('gname'), req.args.getfirst('gx'))), ('args_getlist', '%s,%s' % (req.args.getlist('gname'), req.args.getlist('gx'))), )) buff = get_header("HTTP Form args test") + \ (get_crumbnav(req), "<h2>Get Form</h2>", '<form method="get">', '<input type="text" name="gname" value="Ondřej"><br/>', '<input type="text" name="gsurname" value="Tůma"><br/>', '<input type="text" name="gx" value="1">' '<input type="text" name="gx" value="2">' '<input type="text" name="gx" value="3"><br/>', '<input type="submit" name="btn" value="Send">' '</form>', "<h2>Post Form</h2>", '<form method="post">', '<input type="text" name="pname" value="Ondřej"><br/>', '<input type="text" name="psurname" value="Tůma"><br/>', '<input type="text" name="px" value="8">' '<input type="text" name="px" value="7">' '<input type="text" name="px" value="6"><br/>', '<input type="submit" name="btn" value="Send">' '</form>', "<h2>Variables</h2>", "<table>") + \ tuple("<tr><td>%s:</td><td>%s</td></tr>" % (key, html(val)) for key, val in var_info.items()) + \ ("</table>", "<h2>Browser Headers</h2>", "<table>") + \ tuple("<tr><td>%s:</td><td>%s</td></tr>" % (key, val) for key, val in req.headers_in.items()) + \ ("</table>", "<h2>Request Variables </h2>", "<table>") + \ tuple("<tr><td>%s:</td><td>%s</td></tr>" % (key, val) for key, val in get_variables(req)) + \ ("</table>",) + \ get_footer() for line in buff: req.write(line + '\n') return state.OK
def parse(tokens): if tokens[0] == "{": ret = OrderedDict() tokens = tokens[1:] while tokens[0] != "}": key = tokens[0] tokens = tokens[1:] tokens = tokens[1:] # : value, tokens = parse(tokens) if tokens[0] == ",": tokens = tokens[1:] ret[key] = value tokens = tokens[1:] return ret, tokens elif tokens[0] == "[": ret = [] tokens = tokens[1:] while tokens[0] != "]": value, tokens = parse(tokens) if tokens[0] == ",": tokens = tokens[1:] ret.append(value) tokens = tokens[1:] return ret, tokens else: return tokens[0], tokens[1:]
def addContents(self): dirToProperties = OrderedDict() props = self.getAllProperties() for prop in props: dirToProperties.setdefault(prop.dir, []).append(prop) vbox = self.createVBox(dirToProperties) self.dialog.vbox.pack_start(vbox, expand=True, fill=True)
def all_plugins(): """Return a dict of plugin name -> Plugin for all plugins, including core. Plugins are registered via the ``dxr.plugins`` setuptools entry point, which may point to either a module (in which case a Plugin will be constructed based on the contents of the module namespace) or a Plugin object (which will be returned directly). The entry point name is what the user types into the config file under ``enabled_plugins``. The core plugin, which provides many of DXR's cross-language, built-in features, is always the first plugin when iterating over the returned dict. This lets other plugins override bits of its elasticsearch mappings and analyzers when we're building up the schema. """ global _plugin_cache def name_and_plugin(entry_point): """Return the name of an entry point and the Plugin it points to.""" object = entry_point.load() plugin = (object if isinstance(object, Plugin) else Plugin.from_namespace(object.__dict__)) plugin.name = entry_point.name return entry_point.name, plugin if _plugin_cache is None: # Iterating over entrypoints could be kind of expensive, with the FS # reads and all. _plugin_cache = OrderedDict([('core', core_plugin())]) _plugin_cache.update(name_and_plugin(point) for point in iter_entry_points('dxr.plugins')) return _plugin_cache
def read_all(self): """read all available sheets""" result = OrderedDict() for sheet in self.native_book.sheets: data_dict = self.read_sheet(sheet) result.update(data_dict) return result
def __init__(self, optionMap, allApps): BaseActionRunner.__init__(self, optionMap, logging.getLogger("Queue System Submit")) # queue for putting tests when we couldn't reuse the originals self.reuseFailureQueue = Queue() self.testCount = 0 self.testsSubmitted = 0 self.maxCapacity = 100000 # infinity, sort of self.allApps = allApps for app in allApps: currCap = app.getConfigValue("queue_system_max_capacity") if currCap is not None and currCap < self.maxCapacity: self.maxCapacity = currCap self.jobs = OrderedDict() self.submissionRules = {} self.killedJobs = {} self.queueSystems = {} self.reuseOnly = False self.submitAddress = None self.slaveLogDirs = set() self.delayedTestsForAdd = [] self.remainingForApp = OrderedDict() capacityPerSuite = self.maxCapacity / len(allApps) for app in allApps: self.remainingForApp[app.name] = capacityPerSuite self.getQueueSystem(app) # populate cache QueueSystemServer.instance = self
def gen_mapping(infile): """Generate a mapping file based on infile csv """ fh = open(infile) cont = fh.read() fh.close() row = cont.split(LINE_END)[0] from ordereddict import OrderedDict out = OrderedDict() for x in row.split(","): #y = space_word(x) out.update({ x : x}) ret = """# coding: utf-8 ''' mapping from Google Contact standard field name to our customized fields name. Edit it to make it fit your fields. About Mapping: ----------------- MAPPING = { "Goolge_Field" : "Our Field", ... } So we need to check and edit the left side of the dict(i.e Key) to make the right side(i.e Value) match the left as possible. ''' """ from django.utils import simplejson ret += "MAPPING = " + simplejson.dumps(out, indent=4) outfile = open(OUT_FILE_NAME, "w") outfile.write(ret)
def get_items_for_config_file_output(self, source_to_settings, parsed_namespace): """Converts the given settings back to a dictionary that can be passed to ConfigFormatParser.serialize(..). Args: source_to_settings: the dictionary described in parse_known_args() parsed_namespace: namespace object created within parse_known_args() Returns: an OrderedDict where keys are strings and values are either strings or lists """ config_file_items = OrderedDict() for source, settings in source_to_settings.items(): if source == _COMMAND_LINE_SOURCE_KEY: _, existing_command_line_args = settings[''] for action in self._actions: config_file_keys = self.get_possible_config_keys(action) if config_file_keys and not action.is_positional_arg and \ already_on_command_line(existing_command_line_args, action.option_strings): value = getattr(parsed_namespace, action.dest, None) if value is not None: if isinstance(value, bool): value = str(value).lower() elif callable(action.type): found = [i for i in range(0, len(existing_command_line_args)-1) if existing_command_line_args[i] in config_file_keys] if found: value = existing_command_line_args[found[-1] + 1] config_file_items[config_file_keys[0]] = value elif source == _ENV_VAR_SOURCE_KEY: for key, (action, value) in settings.items(): config_file_keys = self.get_possible_config_keys(action) if config_file_keys: value = getattr(parsed_namespace, action.dest, None) if value is not None: config_file_items[config_file_keys[0]] = value elif source.startswith(_CONFIG_FILE_SOURCE_KEY): for key, (action, value) in settings.items(): config_file_items[key] = value elif source == _DEFAULTS_SOURCE_KEY: for key, (action, value) in settings.items(): config_file_keys = self.get_possible_config_keys(action) if config_file_keys: value = getattr(parsed_namespace, action.dest, None) if value is not None: config_file_items[config_file_keys[0]] = value return config_file_items
def __init__(self): # This gets set in the callback _parent = None self.param_names = [] self.function_names = [] self.gradient = {} self.hessian = {} # Stores the set of edges for which we need derivatives. Dictionary # key is the scope's pathname, since we need to store this for each # assembly recursion level. self.edge_dicts = OrderedDict()
def __repr__(self): ls_result = [] for i in self.data: ls_file_stat = OrderedDict() ls_file_stat['permissions'] = '%s%s' % ( self._get_file_type(i['file_type']), self._get_file_permission_as_str(i['permission'])) ls_file_stat['block_replication'] = i['block_replication'] ls_file_stat['owner'] = i['owner'] ls_file_stat['group'] = i['group'] ls_file_stat['size'] = i['length'] ls_file_stat['last_modification'] = self._get_formatted_time(i['modification_time']) ls_file_stat['path'] = i['path'] ls_result.append(ls_file_stat) return tabulate(ls_result, headers="keys")
def to_display_object_literal(self, total=None, query_params=None, partial=False): wrapper = OrderedDict() wrapper[self.model_name] = self.to_object_literal(partial=partial) if query_params is not None: wrapper['QueryParams'] = query_params elif self.query_params is not None: wrapper['QueryParams'] = self.query_params if total is not None: wrapper['TotalCount'] = total elif self.total is not None: wrapper['TotalCount'] = self.total return wrapper
class TestIteratorMap: def __init__(self, dynamic, allApps): self.dict = OrderedDict() self.dynamic = dynamic self.parentApps = {} for app in allApps: for extra in [app] + app.extras: self.parentApps[extra] = app def getKey(self, test): if self.dynamic: return test elif test is not None: return self.parentApps.get(test.app, test.app), test.getRelPath() def store(self, test, iter): self.dict[self.getKey(test)] = iter def updateIterator(self, test, oldRelPath): # relative path of test has changed key = self.parentApps.get(test.app, test.app), oldRelPath iter = self.dict.get(key) if iter is not None: self.store(test, iter) del self.dict[key] return iter else: return self.getIterator(test) def getIterator(self, test): return self.dict.get(self.getKey(test)) def remove(self, test): key = self.getKey(test) if self.dict.has_key(key): del self.dict[key]
def parse_raw_package_data(raw): try: var = re.split('\|+|\:', raw) fields = OrderedDict({ 'codename': var[0], 'component': var[1], 'architecture': var[2] }) fields['package'], fields['version'] = var[3].split() except Exception as e: logging.warn(e) logging.warn('unscriptable package => %s' % raw) fields = {} return fields
def vendor_resource_type_frequency_csv(request): form = APIFilterForm(request.GET) if not form.is_valid(): return HttpResponseBadRequest(json.dumps(form.errors)) rows, vendor_count = _vendor_resource_type_frequency(**form.cleaned_data) response = _create_csv_response('vendor_resource_frequency.csv') field_names = OrderedDict((('answer_text', 'Fish Family'), ('count', 'Count'), ('percent', 'Percent'))) writer = SlugCSVWriter(response, field_names) writer.writeheader() for row in rows: writer.writerow(row) return response
def test_box(): title = 'V8 benchmark results' data = OrderedDict() data['Chrome'] = [6395, 8212, 7520, 7218, 12464, 1660, 2123, 8607] data['Firefox'] = [7473, 8099, 11700, 2651, 6361, 1044, 3797, 9450] data['Opera'] = [3472, 2933, 4203, 5229, 5810, 1828, 9013, 4669] data['IE'] = [43, 41, 59, 79, 144, 136, 34, 102] pe.save_as( adict=data, dest_title=title, dest_chart_type='box', dest_file_name='box.svg', dest_no_prefix=True ) _validate_and_remove('box.svg')
def import_data(): options = get_cli_options() excute_str=excutemysqlstr(options.to_dsn,options.dbtype,options.sid) query_str=query_data(options.from_dsn,options.dbtype,options.sid,options.where) result= [] count=0 batch=10000 cnt=5 print "start................" selectsql=query_str.query_db() tocursor =excute_str.gen_cursor() fromcursor = query_str.gen_cursor().cursor() fromcursor.execute(selectsql) result = fromcursor.fetchmany(batch) columns = [i[0] for i in fromcursor.description] result =[OrderedDict(zip(columns, row)) for row in result] while result: lg=len(result) try: tocursor.insert_many(result,ordered=False) count+=1 dt=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) if (lg==batch and (count%cnt)==0): print "%s commited %s" %(count*batch,dt) elif(lg != batch): print "%s commited %s" %((count-1)*batch+lg,dt) else: pass except (KeyboardInterrupt, SystemExit): raise except Exception,e: print Exception,":",e result = fromcursor.fetchmany(batch) columns = [i[0] for i in fromcursor.description] result =[OrderedDict(zip(columns, row)) for row in result]
def scan_file(file_name, subfile): # start analysis with basic info try: pe = pefile.PE(file_name) machine = pe.FILE_HEADER.Machine except Exception as err: print '[warn] file is not a PE. skipping some checks: %s' % err pe = False pass all_results = OrderedDict( [ ('MD5', get_hash(file_name, 'md5')), ('SHA1', get_hash(file_name, 'sha1')), ('SHA256', get_hash(file_name, 'sha256')), ('Type', commands.getoutput('file %s' % file_name).split(file_name + ': ')[1]), ('Size', (os.path.getsize(file_name))/1000), ('SSDeep', get_ssdeep(file_name)), #('ImpHash', pe.get_imphash()), ('Arch', pefile.MACHINE_TYPE[machine] if pe is not False else 'NA'), ('Entry Point', hex(pe.OPTIONAL_HEADER.AddressOfEntryPoint) if pe is not False else 'NA'), ('Compiled', datetime.datetime.fromtimestamp(pe.FILE_HEADER.TimeDateStamp) if pe is not False else 'NA'), ('Start Address', grep_saddress(file_name) if pe is not False else 'NA') ] ) print '\n[ %s ]' % file_name for key, value in all_results.iteritems(): if key == 'Compiled' or key == 'Entry Point' or key == 'Start Address': print '%s:\t\t%s' % (key, value) else: print '%s:\t\t\t%s' % (key, value) if subfile: print '\n' check_subfile(file_name)
def json_representation(notification, render_json=True): document = OrderedDict() document['id'] = notification.pk document['notification_type'] = notification.notification_type try: document['notified_user'] = \ notification.notified_user.userprofile.get_restful_link_metadata() except UserProfile.DoesNotExist: document['notified_user'] = None document['info'] = notification.make_info_dict() if render_json: return render_as_json(document) else: return document
def __init__(self, status, title, description=None, headers=None, href=None, href_text=None, code=None): """Initialize with information that can be reported to the client Falcon will catch instances of HTTPError (and subclasses), then use the associated information to generate a nice response for the client. Args: status: HTTP status code and text, such as "400 Bad Request" title: Human-friendly error title. Set to None if you wish Falcon to return an empty response body (all remaining args will be ignored except for headers.) Do this only when you don't wish to disclose sensitive information about why a request was refused, or if the status and headers are self-descriptive. description: Human-friendly description of the error, along with a helpful suggestion or two (default None). headers: A dictionary of extra headers to return in the response to the client (default None). href: A URL someone can visit to find out more information (default None). Unicode characters are percent-encoded. href_text: If href is given, use this as the friendly title/description for the link (defaults to "API documentation for this error"). code: An internal code that customers can reference in their support request or to help them when searching for knowledge base articles related to this error. """ self.status = status self.title = title self.description = description self.headers = headers self.code = code if href: link = self.link = OrderedDict() link['text'] = (href_text or 'API documention for this error') link['href'] = util.percent_escape(href) link['rel'] = 'help' else: self.link = None
def getbody(body): sources = OrderedDict([ ('celestrak-stations', TLESource('http://celestrak.com/NORAD/elements/stations.txt')), ('celestrak-recent', TLESource('http://celestrak.com/NORAD/elements/tle-new.txt')), # ('celestrak-gps', TLESource('http://celestrak.com/NORAD/elements/supplemental/gps.txt')), # ('celestrak-visual', TLESource('http://celestrak.com/NORAD/elements/visual.txt')), # ('celestrak-weather', TLESource('http://celestrak.com/NORAD/elements/weather.txt')), ('kepler', TLESource('http://mstl.atl.calpoly.edu/~ops/keps/kepler.txt')), # ('zarya', ZaryaSource()), ('horizons', HorizonsSource()), ]) aliases = { 'iss': 'ISS (ZARYA)', 'dragon': '39680', 'kicksat': '39685', } aliases = dict((k.lower(), v) for k, v in aliases.items()) body = aliases.get(body.lower(), body) known_sources = { 'ISS (ZARYA)': 'celestrak-stations', } known_sources = dict((k.lower(), v) for k, v in known_sources.items()) sourcelist = list(sources) # TODO: check epoch if there are multiple, and take the one we're nearest to # TODO: maybe split this up into identifying the catalog numbers, and then downloading TLEs # http://www.celestrak.com/NORAD/elements/master.asp if body.isdigit(): # Assume it's a catalog number # Space Track is slow, but good data sourcelist.insert(0, 'spacetrack') sources['spacetrack'] = SpaceTrackSource() else: try: source = known_sources[body] sourcelist.remove(source) sourcelist.insert(0, source) except KeyError, e: pass
def run(self): goodSuites = [] rejectionInfo = OrderedDict() self.notify("StartRead") for suite in self.suites: try: self.readTestSuiteContents(suite) self.diag.info("SUCCESS: Created test suite of size " + str(suite.size())) if suite.size() > 0 or self.allowEmpty: goodSuites.append(suite) suite.notify("Add", initial=True) else: rejectionInfo[suite.app] = "no tests matching the selection criteria found." except plugins.TextTestError, e: rejectionInfo[suite.app] = str(e)
def get(self, request, *args, **kwargs): notification_id = kwargs.get('notification_id', 0) try: notification = Notification.objects.get(pk=notification_id) json_document = NotificationResource.json_representation( notification) return HttpResponse(json_document) except Notification.DoesNotExist: error = OrderedDict() message = unicode(strings.NON_EXISTANT_NOTIFICATION) error['message'] = message error_list = [error] return HttpResponseNotFound( content=json_error_document(error_list), content_type='application/json')
def next(self): """ Parses the next line of output from the VCF file into a dictionary as described in class description. Returns: dict: dictionary containing parsed VCF data """ line = self._file_object.next() line = line.strip() columns = line.split(VCF_DELIMITER) vcf_dict = OrderedDict(zip(self.columns, columns)) vcf_dict['INFO'], wierd_lovd_tag = self._get_info_dict( vcf_dict['INFO']) return VCFLine(vcf_dict), wierd_lovd_tag
def ordered(self): """ Returns an OrderedDict of sorted lists of names by first letter of sortkey. :param credits_data: any iterable of CSV formatted strings. :return: OrderedDict """ ordered_names = OrderedDict() for name, sortkey in self.rows: letter = sortkey[0] if letter not in ordered_names: ordered_names[letter] = [] ordered_names[letter].append(name) return ordered_names
def test_stringify_files_creates_correct_form_content(self): request = { 'data': OrderedDict([('id', 42), ('name', 'test')]), 'headers': { 'content-type': swaggerpy.http_client.APP_FORM } } with patch('swaggerpy.async_http_client.StringIO', return_value='foo') as mock_stringIO: with patch('swaggerpy.async_http_client.FileBodyProducer', ) as mock_fbp: swaggerpy.async_http_client.stringify_body(request) expected_contents = ('id=42&name=test') mock_stringIO.assert_called_once_with(expected_contents) mock_fbp.assert_called_once_with('foo')
def test_pie(): title = 'Browser usage in February 2012 (in %)' data = OrderedDict() data['IE'] = [19.5] data['Firefox'] = [36.6] data['Chrome'] = [36.3] data['Safari'] = [4.5] data['Opera'] = [2.3] pe.save_as( adict=data, dest_title=title, dest_chart_type='pie', dest_file_name='pie.svg', dest_no_prefix=True ) _validate_and_remove('pie.svg')
class MainApp(object): def __init__(self, name, cmds, config='app.cfg', host='127.0.0.1', port='5000', script_url=SCRIPT_URL): self.name = name self.cmds = OrderedDict([(c.name, c) for c in cmds]) self.app = Flask(__name__) self.config = os.path.abspath(config) # Not being used! self.app.config.from_pyfile(self.config) # Directories with contents displayed in the page self.dirs = [] self.host = host self.port = port # Create the url_rules for the Forms for i, cmd in enumerate(self.cmds.values()): self.app.add_url_rule( SCRIPT_URL + (cmd.name if i > 0 else '') , cmd.name , partial(self.form, cmd.name) , methods=['GET', 'POST']) # Create the url_rules for serving Form's files directories for c in cmds: for d in c.dirs: self.app.add_url_rule( "{}{}/<path:filename>".format(SCRIPT_URL, d) , "{}-{}".format(cmd.name, d) , partial(self.serve_files, d) , methods=['GET']) self.dirs.append(DirContents(d)) def run(self): self.app.run(debug=True, host=self.host) def serve_files(self, dir, filename): return send_from_directory(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '{}')).format(dir), filename) def form(self, cmd_name): f = self.cmds[cmd_name] self.active = cmd_name f.stdout = '' if request.method == 'POST': f.process(request.form) if f.form.validate(): f.run() return render_template('form.html', form=f.fields_list(), desc=Markup(f.desc), dirs=self.dirs, output_type=f.output_type, output=f.stdout, app=self)
def full_data_dump_csv(request, survey_slug): survey = Survey.objects.get(slug=survey_slug) response = _create_csv_response('full_dump_{0}.csv'.format( datetime.date.today().strftime('%d-%m-%Y'))) fields = OrderedDict(Respondant.get_field_names().items() + survey.generate_field_names().items()) writer = SlugCSVWriter(response, fields) writer.writeheader() for resp in survey.respondant_set.filter(complete=True): # very basic removal of some characters that were causing issue in writing rows # row_string = resp.csv_row.json_data.replace('\u2019', '\"') # row_string = row_string.replace('\u2026', '\"') row_ascii = resp.csv_row.json_data.encode('ascii', errors='ignore') writer.writerow(json.loads(row_ascii)) return response
def publication_types(self): path = self.context.absolute_url_path() query = { 'path': {'query': path, 'depth': 1}, 'sort_on': 'getObjPositionInParent', 'object_provides': IPossiblePublicationContainer.__identifier__, } top_level_publication_folders = self.pc(query) pub_types = OrderedDict() for pub_folder in top_level_publication_folders: pub_types[pub_folder.getId] = { 'title': pub_folder.Title, 'description': pub_folder.Description, } return pub_types
def convertQuery(self, query): query = query.split('-') low, high = float(query[0]), float(query[1]) hunit, lunit = 'b', 'b' temp = [(1073741824, 'Gb'), (1048576, 'Mb'), (1024, 'Kb')] sizes = OrderedDict(temp) for item in sizes: if high > item and hunit == 'b': high = high / item hunit = sizes[item] if low > item and lunit == 'b': low = low / item lunit = sizes[item] low, high = round(low, 2), round(high, 2) return '> ' + str(low) + lunit + '\n to \n' + str(high) + hunit
def run(self, request, *args, **kwargs): form = LatLngValidationForm(request.GET) # Bad Request if not form.is_valid(): error_list = error_list_from_form(form, prefix_with_fields=False) return HttpResponseBadRequest( content=json_error_document(error_list), content_type='application/json') # FIXME: Implementation pending # Build response document = OrderedDict() return HttpResponse(content=render_as_json(document), content_type='application/json')
def single_select_count_csv(request, question_slug): form = APIFilterForm(request.GET) if not form.is_valid(): return HttpResponseBadRequest(json.dumps(form.errors)) rows, labels = _single_select_count(question_slug, **form.cleaned_data) response = _create_csv_response('vendor_resource_frequency.csv') field_names = OrderedDict(( ('answer', question_slug), ('count', 'Count'), )) writer = SlugCSVWriter(response, field_names) writer.writeheader() for row in rows: writer.writerow(row) return response
def testComments(self): """ This method tests :class:`foundations.parsers.SectionsFileParser` class comments consistencies. """ for type, file in STANDARD_FILES.iteritems(): sectionsFileParser = SectionsFileParser(file) sectionsFileParser.read() and sectionsFileParser.parse( rawSections=STANDARD_FILES_RAW_SECTIONS[type]) self.assertEqual(sectionsFileParser.comments, OrderedDict()) sectionsFileParser.parse( rawSections=STANDARD_FILES_RAW_SECTIONS[type], stripComments=False) for comment, value in RANDOM_COMMENTS[type].iteritems(): self.assertIn(comment, sectionsFileParser.comments) self.assertEqual(value["id"], sectionsFileParser.comments[comment]["id"])
def assign_initial_positions(data, k): """ * Creates the initial positions for the given * number of clusters and data. * @param array data * @param int k * * @return array """ small = min(data) big = max(data) num = ceil((abs(big - small) * 1.0) / k) cPositions = OrderedDict() while k > 0: k -= 1 cPositions[k] = small + num * k return cPositions
def get_items_for_config_file_output(self, source_to_settings, parsed_namespace): """Does the inverse of config parsing by taking parsed values and converting them back to a string representing config file contents. Args: source_to_settings: the dictionary created within parse_known_args() parsed_namespace: namespace object created within parse_known_args() Returns: an OrderedDict with the items to be written to the config file """ config_file_items = OrderedDict() for source, settings in source_to_settings.items(): if source == _COMMAND_LINE_SOURCE_KEY: _, existing_command_line_args = settings[''] for action in self._actions: config_file_keys = self.get_possible_config_keys(action) if config_file_keys and not action.is_positional_arg and \ already_on_command_line(existing_command_line_args, action.option_strings): value = getattr(parsed_namespace, action.dest, None) if value is not None: if type(value) is bool: value = str(value).lower() elif type(value) is list: value = "[" + ", ".join(map(str, value)) + "]" config_file_items[config_file_keys[0]] = value elif source == _ENV_VAR_SOURCE_KEY: for key, (action, value) in settings.items(): config_file_keys = self.get_possible_config_keys(action) if config_file_keys: value = getattr(parsed_namespace, action.dest, None) if value is not None: config_file_items[config_file_keys[0]] = value elif source.startswith(_CONFIG_FILE_SOURCE_KEY): for key, (action, value) in settings.items(): config_file_items[key] = value elif source == _DEFAULTS_SOURCE_KEY: for key, (action, value) in settings.items(): config_file_keys = self.get_possible_config_keys(action) if config_file_keys: value = getattr(parsed_namespace, action.dest, None) if value is not None: config_file_items[config_file_keys[0]] = value return config_file_items
def test_credits_ordered(self): """Should give an ordered dict or ordered lists keyed on first letter of sortkey.""" self.credits_file.readlines = Mock(return_value=[ 'Bunny Lebowski,Lebowski Bunny', 'Maude Lebowski,Lebowski Maude', 'Jeffrey Lebowski,Lebowski Jeffrey', 'Uli Kunkel,Kunkel', 'The Dude,Dude', 'Walter Sobchak,Sobchak', 'Theodore Donald Kerabatsos,Kerabatsos', ]) good_names = OrderedDict() good_names['D'] = ['The Dude'] good_names['K'] = ['Theodore Donald Kerabatsos', 'Uli Kunkel'] good_names['L'] = ['Bunny Lebowski', 'Jeffrey Lebowski', 'Maude Lebowski'] good_names['S'] = ['Walter Sobchak'] self.assertEqual(self.credits_file.ordered, good_names)
def process_exclude_option(exclude_option, excattrs): # collect exclude configuration information Exc = KgenConfigParser(allow_no_value=True) #Exc.optionxform = str Exc.read(exclude_option) for section in Exc.sections(): lsection = section.lower().strip() if lsection == 'common': print 'ERROR: a section of "common" is discarded in INI file for exclusion. Please use "namepath" section instead' sys.exit(-1) excattrs[lsection] = OrderedDict() for option in Exc.options(section): loption = option.lower().strip() excattrs[lsection][loption] = Exc.get(section, option).strip().split('=')