def testLoadData(self): """ _testLoadData_ Test loading the JobGroup, it's meta data and any data associated with its output fileset and jobs from the database. """ testJobGroupA = self.createTestJobGroup() testJobGroupB = JobGroup(id=testJobGroupA.id) testJobGroupB.loadData() self.assertEqual(testJobGroupB.subscription["id"], testJobGroupA.subscription["id"], "Job group did not load subscription correctly") # Build a tuple with important job information for each job group goldenJobs = [(job['id'], job['name'], sorted([inp_file['lfn'] for inp_file in job['input_files']])) for job in testJobGroupA.getJobs(type="list")] loadedJobs = [(job['id'], job['name'], sorted([inp_file['lfn'] for inp_file in job['input_files']])) for job in testJobGroupB.getJobs(type="list")] # Make sure each job from one group is in the other list for job in loadedJobs: self.assertIn(job, goldenJobs, "JobGroup loaded an unknown job: \n%s \nis not in \n%s" % (pretty(job), pretty(goldenJobs))) goldenJobs.remove(job) self.assertFalse(goldenJobs, "JobGroup didn't load all jobs") self.assertEqual(testJobGroupB.output.id, testJobGroupA.output.id, "Output fileset didn't load properly") return
def _fetch(self, inject=None): """ Fetch tag data from the server """ TCMS._fetch(self, inject) # Directly fetch from the initial object dict if inject is not None: log.debug("Initializing Tag ID#{0}".format(inject["id"])) log.data(pretty(inject)) self._id = inject["id"] self._name = inject["name"] # Search by tag id elif self._id is not TCMSNone: try: log.info("Fetching tag " + self.identifier) inject = self._server.Tag.get_tags({'ids': [self.id]}) log.debug("Initializing tag " + self.identifier) log.data(pretty(inject)) self._inject = inject self._name = inject[0]["name"] except IndexError: raise TCMSError("Cannot find tag for {0}".format( self.identifier)) # Search by tag name else: try: log.info("Fetching tag '{0}'".format(self.name)) inject = self._server.Tag.get_tags({'names': [self.name]}) log.debug("Initializing tag '{0}'".format(self.name)) log.data(pretty(inject)) self._inject = inject self._id = inject[0]["id"] except IndexError: raise TCMSError("Cannot find tag '{0}'".format(self.name)) # Index the fetched object into cache self._index(self.name)
def get_all_teams(self, season: int = 2020, printData: bool = False): """ Return a list of team objects that obey specified properties: \n Arguments: season (int) : season number which is 2020 (current season) by default. \n printData (bool) : print the returned list of teams nicely. """ # get list of all teams in a current season as a JSON from MongoDB results = self.db["teams"].find({"season": season}) if results is not None: teams = [] for result in results: # if user specifies to print data then pretty-print it if printData: pretty(result) # create team object and add to list team = Team( team_name=result["team_name"], season=result["season"], offensive_rating=result["offensive_rating"], defensive_rating=result["defensive_rating"], pace=result["pace"], wins=result["wins"], loss=result["loss"], ) teams.append(team) return teams # return list of teams # else, if teams are not found on database, return nothing return None
def _create(self, name, product, version, type, **kwargs): """ Create a new test plan """ hash = {} # Name if name is None: raise TCMSError("Name required for creating new test plan") hash["name"] = name # Product if product is None: raise TCMSError("Product required for creating new test plan") elif isinstance(product, (int, str)): product = Product(product) hash["product"] = product.id # Version if version is None: raise TCMSError("Version required for creating new test plan") elif isinstance(version, int): version = Version(version) elif isinstance(version, str): version = Version(name=version, product=product) hash["default_product_version"] = version.id # Type if type is None: raise TCMSError("Type required for creating new test plan") elif isinstance(type, (int, str)): type = PlanType(type) hash["type"] = type.id # Parent parent = kwargs.get("parent") if parent is not None: if isinstance(parent, int): parent = TestPlan(parent) hash["parent"] = parent.id # Document - if not explicitly specified, put empty text hash["text"] = kwargs.get("text", " ") # Workaround for BZ#725995 hash["is_active"] = "1" # Submit log.info("Creating a new test plan") log.data(pretty(hash)) inject = self._server.TestPlan.create(hash) log.data(pretty(inject)) try: self._id = inject["plan_id"] except TypeError: log.debug("Failed to create a new test plan") log.data(pretty(hash)) log.data(pretty(inject)) raise TCMSError("Failed to create test plan") self._fetch(inject) log.info("Successfully created {0}".format(self))
def get_players(self, player_name: str, season: int): # support search for partial strings # for example, if you search for "Antetokounmpo", it would return all 3 players with that last name! name = re.compile(player_name, re.IGNORECASE) players = self.db["players"].find({ "player_name": name, "season": season }) if players is not None: players_matched = [] # since there could be multiple versions of a player in a single season. # happens when he gets traded for result in players: pretty(result) player = Player( player_name=result["player_name"], position=result["position"], season=result["season"], per=result["PER"], true_shooting=result["true_shooting"], defensive_win_shares=result["defensive_win_shares"], offensive_win_shares=result["offensive_win_shares"], points=result["points"], rebounds=result["rebounds"], assists=result["assists"], offensive_rating=result["offensive_rating"], defensive_rating=result["defensive_rating"], ) players_matched.append(player) return players_matched return None
def _fetch(self, inject=None): """ Initialize / refresh test plan data. Either fetch them from the server or use provided hash. """ TCMS._fetch(self, inject) # Fetch the data hash from the server unless provided if inject is None: log.info("Fetching test plan " + self.identifier) try: inject = self._server.TestPlan.filter({'pk': self.id})[0] except IndexError as error: log.debug(error) raise TCMSError( "Failed to fetch test plan TP#{0}".format(self.id)) self._inject = inject # Otherwise just initialize the id from inject else: self._id = inject["plan_id"] log.debug("Initializing test plan " + self.identifier) log.data(pretty(inject)) if "plan_id" not in inject: log.data(pretty(inject)) raise TCMSError("Failed to initialize " + self.identifier) # Set up attributes self._author = User(inject["author_id"]) if inject["owner_id"] is not None: self._owner = User(inject["owner_id"]) else: self._owner = None self._name = inject["name"] self._product = Product({ "id": inject["product_id"], "name": inject["product"]}) self._version = Version({ "id": inject["product_version_id"], "value": inject["product_version"], "product_id": inject["product_id"]}) self._type = PlanType(inject["type_id"]) self._status = PlanStatus(inject["is_active"] in ["True", True]) if inject["parent_id"] is not None: self._parent = TestPlan(inject["parent_id"]) else: self._parent = None # Initialize containers self._testcases = PlanCases(self) self._testruns = PlanRuns(self) self._children = ChildPlans(self) # If all tags are cached, initialize them directly from the inject if "tag" in inject and Tag._is_cached(inject["tag"]): self._tags = PlanTags( self, inset=[Tag(tag) for tag in inject["tag"]]) else: self._tags = PlanTags(self) # Index the fetched object into cache self._index()
def _fetch(self, inject=None): """ Fetch product data from the server """ TCMS._fetch(self, inject) # Directly fetch from the initial object dict if inject is not None: log.debug("Initializing Product ID#{0}".format(inject["id"])) log.data(pretty(inject)) self._id = inject["id"] self._name = inject["name"] # Search by product id elif self._id is not TCMSNone: try: log.info("Fetching product " + self.identifier) inject = self._server.Product.filter({'id': self.id})[0] log.debug("Initializing product " + self.identifier) log.data(pretty(inject)) self._inject = inject self._name = inject["name"] except IndexError: raise TCMSError("Cannot find product for " + self.identifier) # Search by product name else: try: log.info("Fetching product '{0}'".format(self.name)) inject = self._server.Product.filter({'name': self.name})[0] log.debug("Initializing product '{0}'".format(self.name)) log.data(pretty(inject)) self._inject = inject self._id = inject["id"] except IndexError: raise TCMSError("Cannot find product for '{0}'".format( self.name)) # Index the fetched object into cache self._index(self.name)
def dictionary_comprehension_2(): file_sizes = { os.path.realpath(p): os.stat(p).st_size for p in glob.glob('*.py') } prime = [x for x in range(101) if is_prime(x)] prime_square_divisors = { x * x: (1, x, x * x) for x in range(20) if is_prime(x) } print(prime) pretty(prime_square_divisors)
def dictionary_comprehension_1(): country_to_capital = { 'United Kingdom': 'London', 'Brazil': 'Brasilia', 'Morocco': 'Rabat', 'Sweden': 'Stockholm' } capital_to_country = { capitol: country for country, capitol in country_to_capital.items() } pretty(capital_to_country)
def _update(self): """ Save test case data to server """ hash = {} hash["arguments"] = self.arguments hash["case_status"] = self.status.id hash["category"] = self.category.id hash["estimated_time"] = self.time if self.automated and self.manual: hash["is_automated"] = 2 elif self.automated: hash["is_automated"] = 1 else: hash["is_automated"] = 0 hash["is_automated_proposed"] = self.autoproposed hash["extra_link"] = self.link hash["notes"] = self.notes hash["priority"] = self.priority.id hash["product"] = self.category.product.id hash["requirement"] = self.requirement hash["script"] = self.script hash["summary"] = self.summary if self.tester: hash["default_tester"] = self.tester.login log.info("Updating test case " + self.identifier) log.data(pretty(hash)) self._server.TestCase.update(self.id, hash)
def _fetch(self, inject=None): """ Get the missing test plan type data """ TCMS._fetch(self, inject) # Directly fetch from the initial object dict if inject is not None: log.info("Processing PlanType ID#{0} inject".format(inject["id"])) # Search by test plan type id elif self._id is not TCMSNone: try: log.info("Fetching test plan type " + self.identifier) inject = self._server.TestPlan.get_plan_type(self.id) except xmlrpc.client.Fault as error: log.debug(error) raise TCMSError("Cannot find test plan type for " + self.identifier) # Search by test plan type name else: try: log.info("Fetching test plan type '{0}'".format(self.name)) inject = self._server.TestPlan.check_plan_type(self.name) except xmlrpc.client.Fault as error: log.debug(error) raise TCMSError("PlanType '{0}' not found".format(self.name)) # Initialize data from the inject and index into cache log.debug("Initializing PlanType ID#{0}".format(inject["id"])) log.data(pretty(inject)) self._inject = inject self._id = inject["id"] self._name = inject["name"] self._index(self.name)
def show(self, brief=False, formatting=None, values=[]): """ Show metadata """ # Show nothing if there's nothing if not self.data: return None # Custom formatting if formatting is not None: formatting = re.sub("\\\\n", "\n", formatting) name = self.name data = self.data root = self.root evaluated = [] for value in values: evaluated.append(eval(value)) return formatting.format(*evaluated) # Show the name output = utils.color(self.name, 'red') if brief: return output # List available attributes for key, value in sorted(self.data.items()): output += "\n{0}: ".format(utils.color(key, 'yellow')) if isinstance(value, type("")): output += value elif isinstance(value, list) and all( [isinstance(item, type("")) for item in value]): output += utils.listed(value) else: output += pretty(value) output return output + "\n"
def update(self, data): """ Update metadata, handle virtual hierarchy """ # Nothing to do if no data if data is None: return for key, value in sorted(data.items()): # Ensure there are no 'None' keys if key is None: raise utils.FormatError("Invalid key 'None'.") # Handle child attributes if key.startswith('/'): name = key.lstrip('/') # Handle deeper nesting (e.g. keys like /one/two/three) by # extracting only the first level of the hierarchy as name match = re.search("([^/]+)(/.*)", name) if match: name = match.groups()[0] value = {match.groups()[1]: value} # Update existing child or create a new one self.child(name, value) # Update regular attributes else: self.data[key] = value log.debug("Data for '{0}' updated.".format(self)) log.data(pretty(self.data))
def search(**query): """ Search for test cases """ # Special handling for automated & manual attributes manual = automated = None if "automated" in query: automated = query["automated"] del query["automated"] if "manual" in query: manual = query["manual"] del query["manual"] # Map to appropriate value of 'is_automated' attribute if manual is not None or automated is not None: if automated is False and manual is False: raise TCMSError("Invalid search " "('manual' and 'automated' cannot be both False)") elif automated is False: query["is_automated"] = 0 elif manual is False: query["is_automated"] = 1 elif automated is True and manual is True: query["is_automated"] = 2 elif automated is True: query["is_automated__in"] = [1, 2] elif manual is True: query["is_automated__in"] = [0, 2] log.debug("Searching for test cases") log.data(pretty(query)) return [TestCase(inject) for inject in TCMS()._server.TestCase.filter(dict(query))]
def grow(self, path): """ Grow the metadata tree for the given directory path Note: For each path, grow() should be run only once. Growing the tree from the same path multiple times with attribute adding using the "+" sign leads to adding the value more than once! """ if path is None: return path = path.rstrip("/") log.info("Walking through directory {0}".format(os.path.abspath(path))) dirpath, dirnames, filenames = next(os.walk(path)) # Investigate main.fmf as the first file (for correct inheritance) filenames = sorted( [filename for filename in filenames if filename.endswith(SUFFIX)]) try: filenames.insert(0, filenames.pop(filenames.index(MAIN))) except ValueError: pass # Check every metadata file and load data (ignore hidden) for filename in filenames: if filename.startswith("."): continue fullpath = os.path.abspath(os.path.join(dirpath, filename)) log.info("Checking file {0}".format(fullpath)) try: with open(fullpath) as datafile: data = yaml.load(datafile) except yaml.scanner.ScannerError as error: raise (utils.FileError("Failed to parse '{0}'\n{1}".format( fullpath, error))) log.data(pretty(data)) # Handle main.fmf as data for self if filename == MAIN: self.sources.append(fullpath) self.update(data) # Handle other *.fmf files as children else: self.child(os.path.splitext(filename)[0], data, fullpath) # Explore every child directory (ignore hidden dirs and subtrees) for dirname in sorted(dirnames): if dirname.startswith("."): continue # Ignore metadata subtrees if os.path.isdir(os.path.join(path, dirname, SUFFIX)): log.debug("Ignoring metadata tree '{0}'.".format(dirname)) continue self.child(dirname, os.path.join(path, dirname)) # Remove empty children (ignore directories without metadata) for name in list(self.children.keys()): child = self.children[name] if not child.data and not child.children: del (self.children[name]) log.debug("Empty tree '{0}' removed.".format(child.name)) # Apply inheritance when all scattered data are gathered. # This is done only once, from the top parent object. if self.parent is None: self.inherit()
def _fetch(self, inset=None): """ Fetch test runs from the server """ # If data initialized from the inset ---> we're done if Container._fetch(self, inset): return log.info("Fetching testruns for {0}".format(self._identifier)) injects = self._server.TestRun.filter({'plan': self.id}) log.data(pretty(injects)) self._current = set([TestRun(inject) for inject in injects]) self._original = set(self._current)
def _create(self, testcase, testrun, **kwargs): """ Create a new case run """ hash = {} # TestCase if testcase is None: raise TCMSError("Case ID required for new case run") elif isinstance(testcase, str): testcase = TestCase(testcase) hash["case"] = testcase.id # TestRun if testrun is None: raise TCMSError("Run ID required for new case run") elif isinstance(testrun, str): testrun = TestRun(testrun) hash["run"] = testrun.id # Build is required by XMLRPC build = testrun.build hash["build"] = build.id # Submit log.info("Creating new case run") log.data(pretty(hash)) inject = self._server.TestCaseRun.create(hash) log.data(pretty(inject)) try: self._id = inject["case_run_id"] except TypeError: log.debug("Failed to create new case run") log.data(pretty(hash)) log.data(pretty(inject)) raise TCMSError("Failed to create case run") self._fetch(inject) log.info("Successfully created {0}".format(self)) # And finally add to testcases and caseruns containers self.testrun.testcases._fetch( [self.testcase] + list(self.testrun.testcases)) self.testrun.caseruns._fetch( [self] + list(self.testrun.caseruns))
def _fetch(self, inset=None): """ Fetch currently attached tags from the server """ # If data initialized from the inset ---> we're done if Container._fetch(self, inset): return log.info("Fetching tags for {0}".format(self._identifier)) injects = self._server.Tag.filter({'case': self.id}) log.debug(pretty(injects)) self._current = set([Tag(inject) for inject in injects]) self._original = set(self._current)
def inherit(self): """ Apply inheritance """ # Preserve original data and merge parent # (original data needed for custom inheritance extensions) self.original_data = self.data self.merge() log.debug("Data for '{0}' inherited.".format(self)) log.data(pretty(self.data)) # Apply inheritance to all children for child in self.children.values(): child.inherit()
def csv_to_prb(csv_path, prb_path): # read the file with open(csv_path.name) as csvfile: reader = csv.reader(csvfile, delimiter=',') next(reader) rows = list(reader) ids = [int(row[0]) - 1 for row in rows] descs = [row[1].strip() for row in rows] dead = sorted( [ids[n] for n in range(len(ids)) if descs[n] not in ['tetrode']]) for cg in range(len(ids) // 4): print('Group {}:'.format(cg), ids[cg * 4:(cg + 1) * 4]) print(len(dead), 'dead channels:', dead) # Build channel group dictionaries groups = {} for cg in range(len(ids) // 4): channels = ids[cg * 4:(cg + 1) * 4] graph = list(combinations(channels, r=2)) geometry = {ch: geometries[n] for n, ch in enumerate(channels)} description = { ch: desc for ch, desc in list(zip(ids, descs))[cg * 4:(cg + 1) * 4] } groups[cg] = { 'channels': channels, 'graph': graph, 'geometry': geometry, 'description': description } # Write fields out to prb file with open(prb_path.name, 'w') as prb: prb.write('# Dead channels contain all non-tetrode channels!\n\n') prb.write('dead_channels = {}\n'.format(pretty(dead))) prb.write('channel_groups = {}'.format(pretty(groups)))
def _fetch(self, inject=None): """ Initialize / refresh test run data. Either fetch them from the server or use the provided hash. """ TCMS._fetch(self, inject) # Fetch the data hash from the server unless provided if inject is None: log.info("Fetching test run {0}".format(self.identifier)) try: inject = self._server.TestRun.filter({'pk': self.id})[0] except IndexError as error: log.debug(error) raise TCMSError( "Failed to fetch test run TR#{0}".format(self.id)) self._inject = inject else: self._id = inject["run_id"] log.debug("Initializing test run {0}".format(self.identifier)) log.data(pretty(inject)) # Set up attributes self._build = Build(inject["build_id"]) self._manager = User(inject["manager_id"]) self._notes = inject["notes"] self._status = RunStatus(inject["stop_date"]) self._old_status = self._status self._summary = inject["summary"] self._tester = User(inject["default_tester_id"]) self._testplan = TestPlan(inject["plan_id"]) self._time = inject["estimated_time"] try: self._started = datetime.datetime.strptime( inject["start_date"], "%Y-%m-%d %H:%M:%S") except TypeError: self._started = None try: self._finished = datetime.datetime.strptime( inject["stop_date"], "%Y-%m-%d %H:%M:%S") except TypeError: self._finished = None # Initialize containers self._caseruns = RunCaseRuns(self) self._testcases = RunCases(self) self._tags = RunTags(self) # Index the fetched object into cache self._index()
def _fetch(self, inject=None): """ Fetch user data from the server """ TCMS._fetch(self, inject) if inject is None: # Search by id if self._id is not TCMSNone: try: log.info("Fetching user " + self.identifier) inject = self._server.User.filter({"id": self.id})[0] except IndexError: raise TCMSError("Cannot find user for " + self.identifier) # Search by login elif self._login is not TCMSNone: try: log.info("Fetching user for login '{0}'".format( self.login)) inject = self._server.User.filter({"username": self.login})[0] except IndexError: raise TCMSError("No user found for login '{0}'".format( self.login)) # Search by email elif self._email is not TCMSNone: try: log.info("Fetching user for email '{0}'".format( self.email)) inject = self._server.User.filter({"email": self.email})[0] except IndexError: raise TCMSError("No user found for email '{0}'".format( self.email)) # Otherwise initialize to the current user else: log.info("Fetching the current user") inject = self._server.User.get() self._index("i-am-current-user") # Initialize data from the inject and index into cache log.debug("Initializing user UID#{0}".format(inject["id"])) log.data(pretty(inject)) self._inject = inject self._id = inject["id"] self._login = inject["username"] self._email = inject["email"] if inject["first_name"] and inject["last_name"]: self._name = inject["first_name"] + " " + inject["last_name"] else: self._name = None self._index(self.login, self.email)
def _fetch(self, inset=None): """ Save cache timestamp and initialize from inset if given """ TCMS._fetch(self) # Create copies of the initial set (if given) if inset is not None: log.debug("Initializing {0} for {1} from the inset".format( self.__class__.__name__, self._identifier)) log.debug(pretty(inset)) self._current = set(inset) self._original = set(inset) # cache into container class if config.get_cache_level() >= config.CACHE_OBJECTS: self.__class__._cache[self._id] = self # Return True if the data are already initialized return inset is not None
def _update(self): """ Save test case run data to the server """ # Prepare the update hash hash = {} hash["build"] = self.build.id hash["assignee"] = self.assignee.id hash["case_run_status"] = self.status.id hash["notes"] = self.notes hash["sortkey"] = self.sortkey # Work around BZ#715596 if self.notes is None: hash["notes"] = "" log.info("Updating case run " + self.identifier) log.data(pretty(hash)) self._server.TestCaseRun.update(self.id, hash)
def _fetch(self, inject=None): """ Get the missing build data """ TCMS._fetch(self, inject) # Directly fetch from the initial object dict if inject is not None: log.info("Processing build ID#{0} inject".format( inject["build_id"])) # Search by build id elif self._id is not TCMSNone: try: log.info("Fetching build " + self.identifier) inject = self._server.Build.filter({'pk': self.id})[0] except IndexError as error: log.debug(error) raise TCMSError("Cannot find build for " + self.identifier) # Search by build name and product else: try: log.info("Fetching build '{0}' of '{1}'".format( self.name, self.product.name)) inject = self._server.Build.filter({ 'name': self.name, 'product': self.product.id })[0] self._id = inject["build_id"] except IndexError as error: log.debug(error) raise TCMSError("Build '{0}' not found in '{1}'".format( self.name, self.product.name)) except KeyError: if "args" in inject: log.debug(inject["args"]) raise TCMSError("Build '{0}' not found in '{1}'".format( self.name, self.product.name)) # Initialize data from the inject and index into cache log.debug("Initializing Build ID#{0}".format(inject["build_id"])) log.data(pretty(inject)) self._inject = inject self._id = inject["build_id"] self._name = inject["name"] self._product = Product({ "id": inject["product_id"], "name": inject["product"] }) self._index("{0}---in---{1}".format(self.name, self.product.name))
def _update(self): """ Save test run data to the server """ # Prepare the update hash hash = {} hash["build"] = self.build.id hash["default_tester"] = self.tester.id hash["estimated_time"] = self.time hash["manager"] = self.manager.id hash["notes"] = self.notes # This is required until BZ#731982 is fixed hash["product"] = self.build.product.id hash["summary"] = self.summary log.info("Updating test run " + self.identifier) log.data(pretty(hash)) self._server.TestRun.update(self.id, hash)
def show(self, brief=False): """ Show metadata """ # Show the name output = utils.color(self.name, 'red') if brief: return output # List available attributes for key, value in sorted(self.data.iteritems()): output += "\n{0}: ".format(utils.color(key, 'yellow')) if isinstance(value, basestring): output += value elif isinstance(value, list) and all( [isinstance(item, basestring) for item in value]): output += utils.listed(value) else: output += pretty(value) output return output
def _update(self): """ Save test plan data to the server """ # Prepare the update hash hash = {} hash["name"] = self.name hash["product"] = self.product.id hash["type"] = self.type.id hash["is_active"] = self.status.id == 1 if self.parent is not None: hash["parent"] = self.parent.id hash["default_product_version"] = self.version.id if self.owner is not None: hash["owner"] = self.owner.id log.info("Updating test plan " + self.identifier) log.data(pretty(hash)) self._server.TestPlan.update(self.id, hash)
def _fetch(self, inject=None): """ Fetch version data from the server """ TCMS._fetch(self, inject) # Directly fetch from the initial object dict if inject is not None: log.debug("Processing Version ID#{0} inject".format(inject["id"])) # Search by version id elif self._id is not TCMSNone: try: log.info("Fetching version {0}".format(self.identifier)) inject = self._server.Product.filter_versions({'id': self.id})[0] except IndexError: raise TCMSError("Cannot find version for {0}".format( self.identifier)) # Search by product and name else: try: log.info("Fetching version '{0}' of '{1}'".format( self.name, self.product.name)) inject = self._server.Product.filter_versions({ 'product': self.product.id, 'value': self.name })[0] except IndexError: raise TCMSError("Cannot find version for '{0}'".format( self.name)) # Initialize data from the inject and index into cache log.debug("Initializing Version ID#{0}".format(inject["id"])) log.data(pretty(inject)) self._inject = inject self._id = inject["id"] self._name = inject["value"] self._product = Product(inject["product_id"]) # Index by product name & version name (if product is cached) if self.product._name is not TCMSNone: self._index("{0}---in---{1}".format(self.name, self.product.name)) # Otherwise index by id only else: self._index()
def grow(self, path): """ Grow the metadata tree for the given directory path Note: For each path, grow() should be run only once. Growing the tree from the same path multiple times with attribute adding using the "+" sign leads to adding the value more than once! """ if path is None: return path = path.rstrip("/") log.info("Walking through directory {0}".format(path)) try: dirpath, dirnames, filenames = list(os.walk(path))[0] except IndexError: raise utils.FileError( "Unable to walk through the '{0}' directory.".format(path)) children = dict() # Investigate main.fmf as the first file (for correct inheritance) filenames = sorted( [filename for filename in filenames if filename.endswith(SUFFIX)]) try: filenames.insert(0, filenames.pop(filenames.index(MAIN))) except ValueError: pass # Check every metadata file and load data (ignore hidden) for filename in filenames: if filename.startswith("."): continue fullpath = os.path.join(dirpath, filename) log.info("Checking file {0}".format(fullpath)) with open(fullpath) as datafile: data = yaml.load(datafile) log.data(pretty(data)) if filename == MAIN: self.update(data) else: self.child(os.path.splitext(filename)[0], data) # Explore every child directory (ignore hidden) for dirname in sorted(dirnames): if dirname.startswith("."): continue self.child(dirname, os.path.join(path, dirname))
GlobalConsensus.init(inittxn) #initialize txnstream pause("Welcome to bitsim", False) m, time, gt20, btwn = mkMerkleWill(signatories[0].pubKey, signatories[30].pubKey,signatories[5].pubKey) top, bot = normal(signatories[0].pubKey) proof = bot.generateFullProofUpward(top.hash()) print "CONTRACT 1" print """ def normal(key): m = mkM('#NORMAL TXN') return m, m.addBr("if (signed([%r], sig)): ret = Valid(args[-1])"%key) """ pretty(proof[::-1]) print print print "".join(proof[1][::-1]) print print args = [[(m.hash(), 100)],proof] sig = crypto.hash("".join(map(str, args))) args.append(SignedHash(sig, signatories[0].pubKey)) print "How to spend?" pretty(args) print pause("CONTRACT 2") proof2 = gt20.generateFullProofUpward(m.hash()) parts = toScript(proof2, m.hash())
def get_pairs(): raw = _get_data('pairs') return _convert_tuple_of_tuples(raw) def _convert_tuple_of_tuples(pairs): pairs = pairs.get('pairs') # get the lists of lists under key 'pairs' for i, pair in enumerate(pairs): # ['string', ['l','i','s','t']] # the 2nd item is what I call an embedded list # the whole thing I call a segment later on for j, p in enumerate(pair): # can be a string or a list # if its a list, make a tuple if isinstance(p, list): pairs[i][j] = tuple(p) # after making embedded lists into tuples # make this segment into a tuple pairs[i] = tuple(pair) #finally, make the whole thing a tuple return tuple(pairs) if(__name__ == '__main__'): print('REFLECTIONS:') pretty(get_reflections()) print print('PAIRS:') pretty(get_pairs())
else: print %s """ N= 100000 if __name__ == "__main__": code = list(ex%x for x in xrange(N)) m = mast.Mast("compile", "print 1") bot = m.batch_addBr(code)[-1] print print "Generating Hash" mh = m.hash() print "Hash is", enc(mh) print "Generating Proof" proof = bot.generateFullProofUpward(mh) print "proof is:" pretty(proof) code2, _script = mast.toScript(proof, m.hash()) together = enc("".join(map(chr,code2+_script))) print "Blockchain compatible Proof is:" print together c = crypto.hashable(ex%60) dec.getcontext().prec = 30 compressed_size = dec.Decimal(len(str(proof))) initial_size = dec.Decimal(N*sum(map(len,code))) print "Compression achieved for %d branches"%N rate = dec.Decimal(1) - (compressed_size / initial_size) print rate
thing_limit = 100 #our bot user_agent = "Weitz Reddit Analysis Team of Excellence (WRATE) v0.0.1" r = praw.Reddit(user_agent=user_agent) #a submission about fish eating pigeons pigeon_fish_submission = r.get_submission(submission_id = "14dkff") authors = authors_by_submission(pigeon_fish_submission) #lets focus in on the author with the most "comment karma", and find out what subreddits they read author = argmax_f(authors,lambda a: a.comment_karma) author_comments = comments_by_author(author) subreddit_counts = Counter(comment.submission.subreddit.display_name for comment in author_comments) pretty(dict(subreddit_counts)) #lets now go to the first comments rubreddit and find out what other comments there are. What are people saying? subreddit_comments = author_comments[0].subreddit subreddit_comments = take(thing_limit,subreddit.get_comments(limit = thing_limit)) pretty([comment.body for comment in subreddit_comments]) def comments_by_submission(s): """Returns flat list of all comments in a submission""" return s.all_comments def authors_by_submission(s): """Return a flat list of all authors in a submission""" return {comment.author for comment in s.all_comments if comment.author is not None}