def test_slots_being_used(): """ The class is really using __slots__. """ non_slot_instance = C1(x=1, y="test") slot_instance = C1Slots(x=1, y="test") assert "__dict__" not in dir(slot_instance) assert "__slots__" in dir(slot_instance) assert "__dict__" in dir(non_slot_instance) assert "__slots__" not in dir(non_slot_instance) assert set(["x", "y"]) == set(slot_instance.__slots__) if has_pympler: assert asizeof(slot_instance) < asizeof(non_slot_instance) non_slot_instance.t = "test" with pytest.raises(AttributeError): slot_instance.t = "test" assert 1 == non_slot_instance.method() assert 1 == slot_instance.method() assert attr.fields(C1Slots) == attr.fields(C1) assert attr.asdict(slot_instance) == attr.asdict(non_slot_instance)
def union1(p: Union[A, B]): attr.fields(<warning descr="'attr.fields' method should be called on attrs types">p</warning>) attr.fields_dict(<warning descr="'attr.fields_dict' method should be called on attrs types">p</warning>) attr.asdict(p) attr.astuple(p) attr.assoc(p) attr.evolve(p)
def union2(p: Union[Type[A], Type[B]]): attr.fields(p) attr.fields_dict(p) attr.asdict(<warning descr="'attr.asdict' method should be called on attrs instances">p</warning>) attr.astuple(<warning descr="'attr.astuple' method should be called on attrs instances">p</warning>) attr.assoc(<warning descr="'attr.assoc' method should be called on attrs instances">p</warning>) attr.evolve(<warning descr="'attr.evolve' method should be called on attrs instances">p</warning>)
def _test_replace(self, incident: Incident, name: str, value: Any) -> None: mod = {name: value} new = incident.replace(**mod) expected = asdict(incident, recurse=False) expected.update(mod) self.assertEqual(asdict(new, recurse=False), expected)
def structural(p): print(len(p)) attr.fields(p) attr.fields_dict(p) attr.asdict(p) attr.astuple(p) attr.assoc(p) attr.evolve(p)
def asDict(self): """ Return only keys which are different from the defaults. """ defaults = attr.asdict(self.__class__()) data = attr.asdict(self) for key, value in defaults.items(): if data[key] == value: del data[key] return data
async def async_save_to_store(self) -> None: """Generate dynamic data to store and save it to the filesystem.""" store_data = { 'device_info': attr.asdict(self.device_info) } for comp_type, infos in self.info.items(): store_data[comp_type] = [attr.asdict(info) for info in infos.values()] await self.store.async_save(store_data)
def to_dict(self): """Copy the values of a Service into a new dict""" srv_dict = attr.asdict(self) # convert SimpleNamespace to dict props = srv_dict['properties'] srv_dict['properties'] = {key: getattr(props, key) for key in props.__dict__.keys()} return srv_dict
def test_load_modifiers(self): # Given yaml = six.StringIO(textwrap.dedent("""\ packages: - MKL 10.3-1 modifiers: allow_newer: [MKL] allow_older: - numpy allow_any: - pyzmq - pandas request: - operation: install requirement: numpy """)) expected = { 'allow_newer': set(['MKL']), 'allow_older': set(['numpy']), 'allow_any': set(['pyzmq', 'pandas']), } # When scenario = Scenario.from_yaml(yaml) # Then constraints = attr.asdict(scenario.request.modifiers, recurse=False) self.assertEqual(constraints, expected)
def get_token_and_service_catalog(self, request): """ Return a service catalog consisting of all plugin endpoints and an api token. """ try: content = json_from_request(request) except ValueError: pass else: for cred_type in (PasswordCredentials, APIKeyCredentials, TokenCredentials): if cred_type.type_key in content['auth']: try: cred = cred_type.from_json(content) except (KeyError, TypeError): pass else: registry = self.registry_collection.registry_by_event( authentication) behavior = registry.behavior_for_attributes( attr.asdict(cred)) return behavior(self.core, request, cred) request.setResponseCode(400) return json.dumps(invalid_resource("Invalid JSON request body"))
def test_correct_connect(self): """ The most basic possible connect -- MQTT 3.1.1, no QoS/username/password and compliant with the spec. """ events = [] p = MQTTParser() good = b"\x10\x13\x00\x04MQTT\x04\x02\x00x\x00\x07test123" for x in iterbytes(good): events.extend(p.data_received(x)) self.assertEqual(len(events), 1) self.assertEqual( attr.asdict(events[0]), { 'username': None, 'password': None, 'will_message': None, 'will_topic': None, 'client_id': u"test123", 'keep_alive': 120, 'flags': { 'username': False, 'password': False, 'will': False, 'will_qos': 0, 'will_retain': False, 'clean_session': True, 'reserved': False } } )
def line_part(self): return " and ".join( [ "{0} {1}".format(k, v) for k, v in attr.asdict(self, filter=filter_none).items() ] )
def save(self, app_data): """ Commit a app_data to the database :param sakia.data.entities.AppData app_data: the app_data to commit """ with open(self._file, 'w') as outfile: json.dump(attr.asdict(app_data), outfile, indent=4)
def format_and_print_on(self, movie: MovieValue, stream: IO[str]): if self._stream is None: self._stream = stream self._array.append(asdict(movie)) stream.write(self._create_formatted_movie()) return self
def test_non_zero_connect_code_must_have_no_present_session(self): """ A non-zero connect code in a CONNACK must be paired with no session present. Compliance statement MQTT-3.2.2-4 """ h = BasicHandler(self.connect_code) r, t, p, cp = make_test_items(h) data = ( Connect(client_id=u"test123", flags=ConnectFlags(clean_session=False)).serialise() ) for x in iterbytes(data): p.dataReceived(x) events = cp.data_received(t.value()) self.assertEqual(len(events), 1) self.assertEqual( attr.asdict(events[0]), { 'return_code': self.connect_code, 'session_present': False, })
def to_json_dict(self): data_dict = asdict(self) for k in ("version", "metadata_version", "language_version"): data_dict[k] = str(data_dict[k]) data_dict["platform"] = _platform_string(self.platform) return data_dict
def _account_data(cloud): """Generate the auth data JSON response.""" from hass_nabucasa.const import STATE_DISCONNECTED if not cloud.is_logged_in: return { 'logged_in': False, 'cloud': STATE_DISCONNECTED, } claims = cloud.claims client = cloud.client remote = cloud.remote # Load remote certificate if remote.certificate: certificate = attr.asdict(remote.certificate) else: certificate = None return { 'logged_in': True, 'email': claims['email'], 'cloud': cloud.iot.state, 'prefs': client.prefs.as_dict(), 'google_entities': client.google_user_config['filter'].config, 'google_domains': list(google_const.DOMAIN_TO_GOOGLE_TYPES), 'alexa_entities': client.alexa_config.should_expose.config, 'alexa_domains': list(alexa_sh.ENTITY_ADAPTERS), 'remote_domain': remote.instance_domain, 'remote_connected': remote.is_connected, 'remote_certificate': certificate, }
def test_send_email_success(self, monkeypatch): class FakeMailSender: def __init__(self): self.emails = [] def send(self, recipient, msg): self.emails.append( { "subject": msg.subject, "body": msg.body_text, "html": msg.body_html, "recipient": recipient, } ) sender = FakeMailSender() task = pretend.stub() request = pretend.stub( find_service=pretend.call_recorder(lambda *a, **kw: sender) ) msg = EmailMessage(subject="subject", body_text="body") email.send_email(task, request, "recipient", attr.asdict(msg)) assert request.find_service.calls == [pretend.call(IEmailSender)] assert sender.emails == [ { "subject": "subject", "body": "body", "html": None, "recipient": "recipient", } ]
def load(self, filepath='pwm.settings'): """Loads setting from a json file""" with open(filepath) as infile: file_dict = json.load(infile) passwd_filter = self._get_attr_filters() attr_fields = attr.asdict(self, filter=passwd_filter) try: for attr_key in attr_fields: if attr_key in file_dict: self.__setattr__(attr_key, file_dict[attr_key]) try: attr.validate(self) except TypeError: # Python 2 fix value = file_dict[attr_key].encode("utf-8") self.__setattr__(attr_key, value) attr.validate(self) except TypeError as err: # If attrs are of the wrong type then roll back for attr_key in attr_fields: self.__setattr__(attr_key, attr_fields[attr_key]) raise TypeError(err)
def test_inheritance_from_nonslots(): """ Inheritance from a non-slot class works. Note that a slots class inheriting from an ordinary class loses most of the benefits of slots classes, but it should still work. """ @attr.s(slots=True) class C2Slots(C1): z = attr.ib() c2 = C2Slots(x=1, y=2, z="test") assert 1 == c2.x assert 2 == c2.y assert "test" == c2.z c2.t = "test" # This will work, using the base class. assert "test" == c2.t assert 1 == c2.method() assert "clsmethod" == c2.classmethod() assert "staticmethod" == c2.staticmethod() assert set(["z"]) == set(C2Slots.__slots__) c3 = C2Slots(x=1, y=3, z="test") assert c3 > c2 c2_ = C2Slots(x=1, y=2, z="test") assert c2 == c2_ assert "C2Slots(x=1, y=2, z='test')" == repr(c2) hash(c2) # Just to assert it doesn't raise. assert {"x": 1, "y": 2, "z": "test"} == attr.asdict(c2)
def as_pipfile(self): good_keys = ( "hashes", "extras", "markers", "editable", "version", "index", ) + VCS_LIST req_dict = { k: v for k, v in attr.asdict(self, recurse=False, filter=filter_none).items() if k in good_keys } name = self.name base_dict = { k: v for k, v in self.req.pipfile_part[name].items() if k not in ["req", "link"] } base_dict.update(req_dict) conflicting_keys = ("file", "path", "uri") if "file" in base_dict and any(k in base_dict for k in conflicting_keys[1:]): conflicts = [k for k in (conflicting_keys[1:],) if k in base_dict] for k in conflicts: base_dict.pop(k) if "hashes" in base_dict and len(base_dict["hashes"]) == 1: base_dict["hash"] = base_dict.pop("hashes")[0] if len(base_dict.keys()) == 1 and "version" in base_dict: base_dict = base_dict.get("version") return {name: base_dict}
def parse_url(request, url_pattern, id=None, require_id=False): logger.debug('url_pattern: %s', url_pattern) logger.debug('url: %s', request.url) url_kw = re.compile(url_pattern).search(request.url).groupdict() logger.debug('url_kw: %s', url_kw) if 'resource' not in url_kw: raise Http404 if require_id and 'id' not in url_kw: raise Http404 hostname = urlparse.urlparse(request.url).hostname logger.debug('hostname: %s', hostname) action = url_kw.pop('action', 'default') logger.debug('action: %s', action) resource_context = ResourceContext( hostname=hostname, resource=url_kw.pop('resource'), action=action, id=url_kw.pop('id', id), ) logger.debug('resource_context: %s', attr.asdict(resource_context)) return resource_context
def test_recurse_property(self, cls, dict_class): """ Property tests for recursive asdict. """ obj = cls() obj_dict = asdict(obj, dict_factory=dict_class) def assert_proper_dict_class(obj, obj_dict): assert isinstance(obj_dict, dict_class) for field in fields(obj.__class__): field_val = getattr(obj, field.name) if has(field_val.__class__): # This field holds a class, recurse the assertions. assert_proper_dict_class(field_val, obj_dict[field.name]) elif isinstance(field_val, Sequence): dict_val = obj_dict[field.name] for item, item_dict in zip(field_val, dict_val): if has(item.__class__): assert_proper_dict_class(item, item_dict) elif isinstance(field_val, Mapping): # This field holds a dictionary. assert isinstance(obj_dict[field.name], dict_class) for key, val in field_val.items(): if has(val.__class__): assert_proper_dict_class(val, obj_dict[field.name][key]) assert_proper_dict_class(obj, obj_dict)
def as_fill_value(self, user_type=None, auth_mode=None): """Basic implementation matches instance attributes to view form attributes""" class_attrs = [att.name for att in self.__attrs_attrs__] include_attrs = [getattr(self.__class__, name) for name in self.view_class.cls_widget_names() if name in class_attrs] fill = attr.asdict(self, filter=attr.filters.include(*include_attrs)) return fill
def save(self, filepath='pwm.settings'): """Saves setting to a json file""" passwd_filter = self._get_attr_filters() attr_dict = attr.asdict(self, filter=passwd_filter) with open(filepath, 'w') as outfile: json.dump(attr_dict, outfile, sort_keys=True, indent=4)
def test_asdict(self): """ `attr.asdict` works. """ assert { "x": 1, "y": 2, } == attr.asdict(C1(x=1, y=2))
def test_lists_tuples(self, container, C): """ If recurse is True, also recurse into lists. """ assert { "x": 1, "y": [{"x": 2, "y": 3}, {"x": 4, "y": 5}, "a"], } == asdict(C(1, container([C(2, 3), C(4, 5), "a"])))
def test_asdict_preserve_order(self, cls): """ Field order should be preserved when dumping to OrderedDicts. """ instance = cls() dict_instance = asdict(instance, dict_factory=OrderedDict) assert [a.name for a in fields(cls)] == list(dict_instance.keys())
def test_shallow(self, C, dict_factory): """ Shallow asdict returns correct dict. """ assert { "x": 1, "y": 2, } == asdict(C(x=1, y=2), False, dict_factory=dict_factory)
def update_node(self, lb_id, node_id, node_updates): """ Update the weight, condition, or type of a single node. The IP, port, status, and ID are immutable, and attempting to change them will cause a 400 response to be returned. All success and error behavior verified as of 2016-06-16. :param str lb_id: the load balancer ID :param str node_id: the node ID to update :param dict node_updates: The JSON dictionary containing node attributes to update :param current_timestamp: What the current time is :return: a `tuple` of (json response as a dict, http status code) """ feed_summary = ( "Node successfully updated with address: '{address}', port: '{port}', " "weight: '{weight}', condition: '{condition}'") # first, store whether address and port were provided - if they were # that's a validation error not a schema error things_wrong = {k: True for k in ("address", "port", "id") if k in node_updates} node_updates = {k: node_updates[k] for k in node_updates if k not in ("address", "port")} # use the Node.from_json to check the schema try: Node.from_json(dict(address="1.1.1.1", port=80, **node_updates)) except (TypeError, ValueError): return invalid_json_schema() # handle the possible validation (as opposed to schema) errors if not 1 <= node_updates.get('weight', 1) <= 100: things_wrong["weight"] = True if things_wrong: return updating_node_validation_error(**things_wrong) # Now, finally, check if the LB exists and node exists if lb_id in self.lbs: self._verify_and_update_lb_state(lb_id, False, self.clock.seconds()) if self.lbs[lb_id]["status"] != "ACTIVE": return considered_immutable_error( self.lbs[lb_id]["status"], lb_id) for i, node in enumerate(self.lbs[lb_id].nodes): if node.id == node_id: params = attr.asdict(node) params.update(node_updates) self.lbs[lb_id].nodes[i] = Node(**params) self.lbs[lb_id].nodes[i].feed_events.append( (feed_summary.format(**params), seconds_to_timestamp(self.clock.seconds()))) return ("", 202) return node_not_found() return loadbalancer_not_found()
def asdict(self): """Convert to a dictionary.""" return attr.asdict(self)
def to_json(self): """Return a dictionary representation of the PdfFile.""" json_pdf_file = json.dumps(attr.asdict(self)) return json_pdf_file
def async_get_schedule(self, entity_id) -> dict: """Get an existing ScheduleEntry by id.""" res = self.schedules.get(entity_id) return attr.asdict(res) if res else None
def generate_platform_rel(): rows = [BugzillaRow(), BugzillaRow(whiteboard="[platform-rel-google]")] return {"bugs": [attr.asdict(row) for row in rows]}
def _parse(self, string): """Parse a string and return its features. :param string: A one-symbol string in NFD Notes ----- Strategy is rather simple: we determine the base part of a string and then search left and right of this part for the additional features as expressed by the diacritics. Fails if a segment has more than one basic part. """ nstring = self._norm(string) # check whether sound is in self.sounds if nstring in self.sounds: sound = self.sounds[nstring] sound.normalized = nstring != string sound.source = string return sound match = list(self._regex.finditer(nstring)) # if the match has length 2, we assume that we have two sounds, so we split # the sound and pass it on for separate evaluation (recursive function) if len(match) == 2: sound1 = self._parse(nstring[:match[1].start()]) sound2 = self._parse(nstring[match[1].start():]) # if we have ANY unknown sound, we mark the whole sound as unknown, if # we have two known sounds of the same type (vowel or consonant), we # either construct a diphthong or a cluster if 'unknownsound' not in (sound1.type, sound2.type) and \ sound1.type == sound2.type: # diphthong creation if sound1.type == 'vowel': return Diphthong.from_sounds( # noqa: F405 string, sound1, sound2, self) elif sound1.type == 'consonant' and \ sound1.manner in ('stop', 'implosive', 'click', 'nasal') and \ sound2.manner in ('stop', 'implosive', 'affricate', 'fricative'): return Cluster.from_sounds( # noqa: F405 string, sound1, sound2, self) return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 if len(match) != 1: # Either no match or more than one; both is considered an error. return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 pre, mid, post = nstring.partition( nstring[match[0].start():match[0].end()]) base_sound = self.sounds[mid] if isinstance(base_sound, Marker): # noqa: F405 assert pre or post return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 # A base sound with diacritics or a custom symbol. features = attr.asdict(base_sound) features.update(source=string, generated=True, normalized=nstring != string, base=base_sound.grapheme) # we construct two versions: the "normal" version and the version where # we search for aliases and normalize them (as our features system for # diacritics may well define aliases grapheme, sound = '', '' for dia in [p + EMPTY for p in pre]: feature = self.diacritics[base_sound.type].get(dia, {}) if not feature: return UnknownSound( # noqa: F405 grapheme=nstring, source=string, ts=self) features[self._feature_values[feature]] = feature # we add the unaliased version to the grapheme grapheme += dia[0] # we add the corrected version (if this is needed) to the sound sound += self.features[base_sound.type][feature][0] # add the base sound grapheme += base_sound.grapheme sound += base_sound.s for dia in [EMPTY + p for p in post]: feature = self.diacritics[base_sound.type].get(dia, {}) # we are strict: if we don't know the feature, it's an unknown # sound if not feature: return UnknownSound( # noqa: F405 grapheme=nstring, source=string, ts=self) features[self._feature_values[feature]] = feature grapheme += dia[1] sound += self.features[base_sound.type][feature][1] features['grapheme'] = sound new_sound = self.sound_classes[base_sound.type](**features) # check whether grapheme differs from re-generated sound if text_type(new_sound) != sound: new_sound.alias = True if grapheme != sound: new_sound.alias = True new_sound.grapheme = grapheme return new_sound
def to_json(self): """Return a JSON representation of the data.""" return attr.asdict(self)
def test_create_guild(self): test_data = {"id": 1, "options": asdict(Options()), "members": []} test_guild = FactoryBuilder.create_guild_from_dict(test_data) guild = Guild(id=1, options=Options()) assert test_guild == guild test_data_two = { "id": 1, "options": asdict(Options()), "members": [{ "id": 1, "guild_id": 2, "is_in_guild": True, "warn_count": 5, "kick_count": 6, "duplicate_count": 7, "duplicate_channel_counter_dict": {}, "messages": [{ "id": 1, "content": "Hello World!", "guild_id": 2, "author_id": 3, "channel_id": 4, "is_duplicate": True, "creation_time": "225596:21:8:3:12:5:2021", }], }], } test_guild_two = FactoryBuilder.create_guild_from_dict(test_data_two) time = datetime.datetime.strptime("225596:21:8:3:12:5:2021", "%f:%S:%M:%H:%d:%m:%Y") message = Message( id=1, content="Hello World!", guild_id=2, author_id=3, channel_id=4, is_duplicate=True, creation_time=time, ) member_two = Member( id=1, guild_id=2, internal_is_in_guild=True, warn_count=5, kick_count=6, duplicate_counter=7, duplicate_channel_counter_dict={}, messages=[message], ) guild_two = Guild(id=1, options=Options()) guild_two.members[1] = member_two assert test_guild_two == guild_two
def convert_to_base_types(obj, ignore_keys=tuple(), tuple_type=tuple, json_safe=True): """Recursively convert objects into base types. This is used to convert some special types of objects used internally into base types for more friendly output via mechanisms such as JSON. It is used for sending internal objects via the network and outputting test records. Specifically, the conversions that are performed: - If an object has an as_base_types() method, immediately return the result without any recursion; this can be used with caching in the object to prevent unnecessary conversions. - If an object has an _asdict() method, use that to convert it to a dict and recursively converting its contents. - mutablerecords Record instances are converted to dicts that map attribute name to value. Optional attributes with a value of None are skipped. - Enum instances are converted to strings via their .name attribute. - Real and integral numbers are converted to built-in types. - Byte and unicode strings are left alone (instances of six.string_types). - Other non-None values are converted to strings via str(). The return value contains only the Python built-in types: dict, list, tuple, str, unicode, int, float, long, bool, and NoneType (unless tuple_type is set to something else). If tuples should be converted to lists (e.g. for an encoding that does not differentiate between the two), pass 'tuple_type=list' as an argument. Args: obj: object to recursively convert to base types. ignore_keys: Iterable of str, keys that should be ignored when recursing on dict types. tuple_type: Type used for tuple objects. json_safe: If True, then the float 'inf', '-inf', and 'nan' values will be converted to strings. This ensures that the returned dictionary can be passed to json.dumps to create valid JSON. Otherwise, json.dumps may return values such as NaN which are not valid JSON. Returns: Version of the object composed of base types. """ # Because it's *really* annoying to pass a single string accidentally. assert not isinstance(ignore_keys, six.string_types), 'Pass a real iterable!' if hasattr(obj, 'as_base_types'): return obj.as_base_types() if hasattr(obj, '_asdict'): try: obj = obj._asdict() except TypeError as e: # This happens if the object is an uninitialized class. logging.warning('Object %s is not initialized, got error %s', obj, e) elif isinstance(obj, records.RecordClass): new_obj = {} for a in type(obj).all_attribute_names: val = getattr(obj, a, None) if val is not None or a in type(obj).required_attributes: new_obj[a] = val obj = new_obj elif attr.has(type(obj)): obj = attr.asdict(obj, recurse=False) elif isinstance(obj, enum.Enum): obj = obj.name if type(obj) in PASSTHROUGH_TYPES: # pylint: disable=unidiomatic-typecheck return obj # Recursively convert values in dicts, lists, and tuples. if isinstance(obj, dict): return { # pylint: disable=g-complex-comprehension convert_to_base_types(k, ignore_keys, tuple_type): convert_to_base_types(v, ignore_keys, tuple_type) for k, v in six.iteritems(obj) if k not in ignore_keys } elif isinstance(obj, list): return [ convert_to_base_types(val, ignore_keys, tuple_type, json_safe) for val in obj ] elif isinstance(obj, tuple): return tuple_type( convert_to_base_types(value, ignore_keys, tuple_type, json_safe) for value in obj) # Convert numeric types (e.g. numpy ints and floats) into built-in types. elif isinstance(obj, numbers.Integral): return long(obj) elif isinstance(obj, numbers.Real): as_float = float(obj) if json_safe and (math.isinf(as_float) or math.isnan(as_float)): return str(as_float) return as_float # Convert all other types to strings. try: return str(obj) except: logging.warning('Problem casting object of type %s to str.', type(obj)) raise
def asdict(self): return attr.asdict(self, recurse=False)
def serialize_repr_traceback(reprtraceback: ReprTraceback) -> Dict[str, Any]: result = attr.asdict(reprtraceback) result["reprentries"] = [ serialize_repr_entry(x) for x in reprtraceback.reprentries ] return result
def to_dict(self) -> Dict[str, Any]: return attr.asdict(self)
def data_to_blob(data): as_json = json.dumps(attr.asdict(data)).encode('utf-8') compressed = zlib.compress(as_json) as_base64 = base64.b32encode(compressed) return as_base64.decode('ascii')
def from_process( cls, process: RunningProcess, **kwargs: Union[float, str] ) -> "LocalRunningProcess": return cls(**dict(attr.asdict(process), **kwargs))
import attr, os from pprint import PrettyPrinter from mavetools.client.client import Client from mavetools.models.scoreset import ScoreSet pp = PrettyPrinter(indent=2) # for formatting output # check environment variables and see if variable named MAVEDB_BASE_URL exists and return value # if the value does not exist, an empty string is returned instead base_url = os.getenv("MAVEDB_BASE_URL", "") scoreset_urn = "urn:mavedb:00000001-a-1" # the urn of the experiment we want to get # Generate a new auth_token in your profile and post it here auth_token = "AseyaNLLhqv9jAm0joMkq2oqB0bw3GKxTclkT2NtG340RF6CfdM2UC3j8Fv4RpbQ" # auth_token = # if the base url exists, the client object is instantiated with that value # otherwise the client object is instantiated with default value which points to localhost client = (Client(base_url, auth_token=auth_token) if base_url else Client( auth_token=auth_token)) # GET scoreset = client.get_model_instance(ScoreSet, scoreset_urn) pp.pprint(attr.asdict(scoreset))
def modified_state(signed_state, new_state): return signed_state.to(**attr.asdict(new_state, recurse=False))
def scan_with_config(target:Union[str, List[str]], general_config: GeneralConfig, configured_scripts: Dict[str, ScriptConfig], enabled_scripts:List[str], mail_config: Optional[MailConfig]=None, mailto:Optional[List[str]]=None, extra_substitutions: Optional[Dict[str ,str]]=None, validators: Optional[Dict[str, Union[Callable[[Any], Any], str]]]=None, ticketurl:Optional[str]=None, noteardown:bool=False, outputzip:Optional[str]=None, callback:Optional[Callable[[Report], None]]=None) -> Report: """ Allows to customize the configuration objects for the scan. """ # Fail fast. if not target: error("no URL supplied, supply URL(s) with the 'target' parameter.") # Parse targets if isinstance(target, str): urls = [target] else: urls = target if not configured_scripts: error("at least one script must be configured. i.e. :\n[--nikto]\ndescription = Nikto web server scanner\ncommand = nikto -h $url") else: # TODO: use logger instead of print print(f"Configured scripts: {', '.join(configured_scripts.keys())}") enabled_scripts = _get_enabled_scripts(configured_scripts, enabled_scripts) # Fail fast if not enabled_scripts: error(f"at least one script must be enable: use --<script-name>. i.e.: --{next(iter(configured_scripts))}.\n") else: print(f"Enabled scripts: {' '.join(enabled_scripts) if enabled_scripts!=list(configured_scripts) else 'all'}") # Handle validators, provide default validator for the --url parameter converted_validators: Dict[str, Callable[[Any], Any]] = { 'url': validate_url, 'uuid': validatorslib.uuid, } if validators: for k, v in validators.items(): if isinstance(v, str): converted_validators[k] = get_function(v) else: assert callable(v), f"{v} is not callable" converted_validators[k] = v # Init mailsender mailsender = None if mail_config: mailsender = MailSender(**attr.asdict(mail_config)) # init the temp dir if it doesn't already exists os.makedirs(TEMP_DIR, exist_ok=True) # Begin the actual scanning process_urls_scripts_map: Dict[str, Dict[str, Process]] = {} for _url in urls: process_urls_scripts_map[_url] = {} url_info = ('' if len(urls) == 1 else f' - {_url}') # empty string if we're scanning only one URL. # TODO use multiprocessing! for script in enabled_scripts: process = Process.new(name=script + url_info, command=configured_scripts[script].command, setup=configured_scripts[script].setup, teardown=configured_scripts[script].teardown, interpolations=dict( url=_url, uuid=str(uuid.uuid4()), **(extra_substitutions or {})), validators=converted_validators, timeout=general_config.scan_timeout,) process_urls_scripts_map[_url][script] = process report = Report(title=general_config.title, url_label=urls[0] if len(urls)==1 else "Multiple URLs", datetime=datetime.datetime.now().isoformat(timespec='seconds'), ticketurl=ticketurl, description=general_config.description, footer=general_config.footer ) for _url, scripts_processes in process_urls_scripts_map.items(): url_info = ('' if len(urls) == 1 else f' - {_url}') # empty string if we're scanning only one URL. for tool_name, process in scripts_processes.items(): if process: process.setup() process_result = process.run() if process_result.failure: print(f"Script '{configured_scripts[tool_name].title}' failed.") else: print(f"Script '{configured_scripts[tool_name].title}' succeeded.") report.items.append(ReportItem.new( result=process_result, report=report, truncate_output=general_config.truncate_output, title=configured_scripts[tool_name].title + url_info, description=configured_scripts[tool_name].description, output_files=configured_scripts[tool_name].output_files.splitlines() )) if mailto: if mailsender: print("Sending email report... ") mailsender.send(report, mailto) else: print("Not sending email report because no [mail] config is provided. ") if outputzip: write_report_archive(report, outputzip) if callback is not None: callback(report) if noteardown is not True: teardown_scan(report) else: import warnings warnings.warn("Option noteardown is error-prone because temp files might ot be cleaned-up. Use option callback instead.") return report
def copy(self): return type(self)(**attr.asdict(self, recurse=False))
def flash_response(msg: str, level: str = "INFO") -> web.Response: response = web.json_response( data={"data": attr.asdict(LogMessageType(msg, level)), "error": None} ) return response
def test_asdict(self, cls): """ `attr.asdict` works. """ assert {"x": 1, "y": 2} == attr.asdict(cls(x=1, y=2))
def async_get_schedules(self) -> dict: """Get an existing ScheduleEntry by id.""" res = {} for (key, val) in self.schedules.items(): res[key] = attr.asdict(val) return res
def to_dict(self): return attr.asdict(self)
def register_options(self, parser: OptionManager) -> None: """Registers options for our plugin.""" for option in self.options: parser.add_option(**attr.asdict(option))
def generate_bugzilla(): rows = [ BugzillaRow(), ] bugs = [attr.asdict(row) for row in rows] return {"bugs": bugs}
def _get_args(self) -> List[str]: return _flatten( self._get_arg(k, v) for (k, v) in attr.asdict(self, recurse=False).items() if v is not None and not k.startswith("_") )
def _main(data, glottolog): languoids = list(glottolog.languoids()) lbyi = {l.iso: l for l in languoids if l.iso} dataset = common.Dataset( id='ldh', name='Language Description Heritage', publisher_name="Max Planck Institute for the Science of Human History", publisher_place="Jena", publisher_url="https://www.shh.mpg.de", license="https://creativecommons.org/licenses/by/4.0/", domain='ldh.clld.org', contact='*****@*****.**', jsondata={ 'license_icon': 'cc-by.png', 'license_name': 'Creative Commons Attribution 4.0 International License' }) DBSession.add(dataset) DBSession.add( common.Editor(dataset=dataset, contributor=common.Contributor(id='forkel', name='Robert Forkel'))) ls = set() for post in iter_posts(): if post.pure_item_id: item = pure.Item.from_json(post.pure_item_id) src = data['Description'].get(item.id) if not src: src = data.add( models.Description, item.id, id=item.id, description=item.title, name=item.name, bibtex_type=EntryType.get(item.bibtex_type), year=item.year, title=item.title, address=item.publisher.get('place') if item.publisher else None, publisher=item.publisher.get('publisher') if item.publisher else None, author=' and '.join(item.authors), editor=' and '.join(item.editors), pid=item.doi or item.pid, pid_type='doi' if item.doi else 'hdl', ) DBSession.flush() for file in item.files: if file.visibility == 'PUBLIC' \ and file.metadata["contentCategory"] == "any-fulltext"\ and file.storage == 'INTERNAL_MANAGED': assert file.mimeType == 'application/pdf' DBSession.add( common.Source_files( id=file.pid.replace('/', '__'), name=file.name, object_pk=src.pk, mime_type=file.mimeType, jsondata=dict(size=file.size, license=attr.asdict(file.license) if file.license else None), )) for iso in item.isocodes: if iso in lbyi: gl = lbyi[iso] l = data['LDHLanguage'].get(iso) if not l: l = data.add(models.LDHLanguage, iso, id=iso, name=gl.name) DBSession.flush() if (item.id, iso) not in ls: DBSession.add( common.LanguageSource(language_pk=l.pk, source_pk=src.pk)) ls.add((item.id, iso)) for item in zenodo.iter_items(): src = data.add( models.Description, item.id, id=item.id, description=item['metadata']['title'], name=item.name, bibtex_type=EntryType.get(item.bibtex_type), year=item.year, title=item['metadata']['title'], publisher='Zenodo', author=' and '.join(a['name'] for a in item['metadata']['creators']), pid=item['metadata']['doi'], pid_type='doi', ) DBSession.flush() for file in item['files']: license = licenses.find(item['metadata']['license']['id']) DBSession.add( common.Source_files( id=file['checksum'].replace('md5:', ''), name=file['key'], object_pk=src.pk, mime_type='application/' + file['type'], jsondata=dict( size=file['size'], url=file['links']['self'], license=attr.asdict(license) if license else None), )) for kw in item['metadata']['keywords']: if not kw.startswith('iso:'): continue iso = kw.replace('iso:', '') if iso in lbyi: gl = lbyi[iso] l = data['LDHLanguage'].get(iso) if not l: l = data.add(models.LDHLanguage, iso, id=iso, name=gl.name) DBSession.flush() if (item.id, iso) not in ls: DBSession.add( common.LanguageSource(language_pk=l.pk, source_pk=src.pk)) ls.add((item.id, iso)) load_families(data, data['LDHLanguage'].values(), glottolog_repos=glottolog.repos, isolates_icon='tcccccc')
def save(self, dir_path): with open(os.path.join(dir_path, self._OUTPUT_FILE_NAME), 'w') as f: index_to_rule = [attr.asdict(rule) for rule in self._index_to_rule] json.dump(index_to_rule, f, indent=2, ensure_ascii=False)
def to_dict(self) -> dict: return attr.asdict(self)
def style_single_cell(cell: Cell, style: Style) -> None: for attrib, value in attr.asdict(style).items(): if value: setattr(cell, attrib, value)
def render_assignment_editor(request, course, level_number, assignment_number, menu, translations, version, loaded_program, adventure_assignments, adventure_name): if os.path.isfile( f'coursedata/quiz/quiz_questions_lvl{level_number}.yaml'): quiz_data = utils.load_yaml( f'coursedata/quiz/quiz_questions_lvl{level_number}.yaml') quiz_data_level = quiz_data['level'] else: quiz_data_level = 0 sublevel = None if isinstance(level_number, str) and re.match('\d+-\d+', level_number): sublevel = int(level_number[level_number.index('-') + 1]) level_number = int(level_number[0:level_number.index('-')]) assignment = course.get_assignment(level_number, assignment_number, sublevel) if not assignment: abort(404) arguments_dict = {} # Meta stuff arguments_dict['course'] = course arguments_dict['level_nr'] = str(level_number) arguments_dict['sublevel'] = str(sublevel) if (sublevel) else None arguments_dict[ 'assignment_nr'] = assignment.step # Give this a chance to be 'None' arguments_dict['lang'] = course.language arguments_dict['level'] = assignment.level arguments_dict['prev_level'] = int(level_number) - 1 if int( level_number) > 1 else None arguments_dict['next_level'] = int(level_number) + 1 if int( level_number) < course.max_level() else None arguments_dict['next_assignment'] = int(assignment_number) + 1 if int( assignment_number) < course.max_step(level_number) else None arguments_dict['menu'] = menu arguments_dict['latest'] = version arguments_dict['selected_page'] = 'code' arguments_dict['page_title'] = f'Level {level_number} – Hedy' arguments_dict['docs'] = [attr.asdict(d) for d in assignment.docs] arguments_dict['auth'] = translations.data[course.language]['Auth'] arguments_dict['username'] = current_user(request)['username'] arguments_dict['loaded_program'] = loaded_program arguments_dict['adventure_assignments'] = adventure_assignments arguments_dict['adventure_name'] = adventure_name arguments_dict['quiz_data_level'] = quiz_data_level arguments_dict[ 'quiz_enabled'] = config['quiz-enabled'] and course.language == 'nl' print(course.language == 'nl') # Translations arguments_dict.update( **translations.get_translations(course.language, 'ui')) # Actual assignment arguments_dict.update(**attr.asdict(assignment)) # Add markdowns to docs for doc in arguments_dict['docs']: doc['markdown'] = (course.docs.get(int(level_number), doc['slug']) or { 'markdown': '' }).markdown return render_template("code-page.html", **arguments_dict)
def test_conda_init_install_and_detect(): test_dir = "/tmp/niceman_conda/miniconda" dist = CondaDistribution( name="conda", path=test_dir, conda_version="4.3.31", python_version="2.7.14.final.0", platform=get_conda_platform_from_python(sys.platform) + "-64", environments=[ CondaEnvironment( name="root", path=test_dir, packages=[ { "name": "conda", "installer": None, "version": "4.3.31", "build": None, "channel_name": None, "md5": None, "size": None, "url": None, "files": None, }, { "name": "pip", "installer": None, "version": "9.0.1", "build": None, "channel_name": None, "md5": None, "size": None, "url": None, "files": None, }, { "name": "pytest", "installer": "pip", "version": "3.4.0", "build": None, "channel_name": None, "md5": None, "size": None, "url": None, "files": None, }, ], channels=[ { "name": "conda-forge", "url": "https://conda.anaconda.org/conda-forge/linux-64", }, { "name": "defaults", "url": "https://repo.continuum.io/pkgs/main/linux-64", }, ], ), CondaEnvironment( name="mytest", path=os.path.join(test_dir, "envs/mytest"), packages=[ { "name": "pip", "installer": None, "version": "9.0.1", "build": None, "channel_name": None, "md5": None, "size": None, "url": None, "files": None, }, { "name": "xz", "installer": None, "version": "5.2.3", "build": "0", "channel_name": "conda-forge", "md5": "f4e0d30b3caf631be7973cba1cf6f601", "size": "874292", "url": "https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.3-0.tar.bz2", "files": [ "bin/xz", ], }, { "name": "rpaths", "installer": "pip", "version": "0.13", "build": None, "channel_name": None, "md5": None, "size": None, "url": None, "files": [ "lib/python2.7/site-packages/rpaths.py", ], }, ], channels=[ { "name": "conda-forge", "url": "https://conda.anaconda.org/conda-forge/linux-64", }, ], ), ]) # First install the environment in /tmp/niceman_conda/miniconda dist.initiate(None) dist.install_packages() # Add an empty environment to test detection of them if not os.path.exists(os.path.join(test_dir, "envs/empty")): call("cd " + test_dir + "; " + "./bin/conda create -y -n empty; ", shell=True) # Test that editable packages are detected pymod_dir = os.path.join(test_dir, "minimal_pymodule") if not os.path.exists(pymod_dir): create_pymodule(pymod_dir) call([ os.path.join(test_dir, "envs/mytest/bin/pip"), "install", "-e", pymod_dir ]) # Now pick some files we know are in the conda install and detect them files = [ os.path.join(test_dir, "bin/pip"), os.path.join(test_dir, "envs/mytest/bin/xz"), os.path.join(test_dir, "envs/empty/conda-meta/history"), ] tracer = CondaTracer() dists = list(tracer.identify_distributions(files)) assert len(dists) == 1, "Exactly one Conda distribution expected." (distributions, unknown_files) = dists[0] # NicemanProvenance.write(sys.stdout, distributions) assert distributions.platform.startswith( get_conda_platform_from_python(sys.platform)), \ "A conda platform is expected." assert len(distributions.environments) == 3, \ "Three conda environments are expected." out = { 'environments': [{ 'name': 'root', 'packages': [{ 'name': 'pip' }] }, { 'name': 'mytest', 'packages': [{ 'name': 'xz' }, { 'name': 'pip' }, { 'name': 'rpaths', 'installer': 'pip', 'editable': False }, { 'name': 'nmtest', 'files': [], 'installer': 'pip', 'editable': True }] }] } assert_is_subset_recur(out, attr.asdict(distributions), [dict, list]) # conda packages are not repeated as "pip" packages. for envs in distributions.environments: for pkg in envs.packages: if pkg.name == "pip": assert pkg.installer is None