def barChart(size: typing.Tuple[float, float, int], data: typing.Mapping, output: typing.BinaryIO) -> None: d = data['x'] ind = np.arange(len(d)) ys = data['y'] width = 0.60 fig: Figure = Figure(figsize=(size[0], size[1]), dpi=size[2]) # type: ignore FigureCanvas(fig) # Stores canvas on fig.canvas axis = fig.add_subplot(111) axis.grid(color='r', linestyle='dotted', linewidth=0.1, alpha=0.5) bottom = np.zeros(len(ys[0]['data'])) for y in ys: axis.bar(ind, y['data'], width, bottom=bottom, label=y.get('label')) bottom += np.array(y['data']) axis.set_title(data.get('title', '')) axis.set_xlabel(data['xlabel']) axis.set_ylabel(data['ylabel']) if data.get('allTicks', True) is True: axis.set_xticks(ind) if 'xtickFnc' in data: axis.set_xticklabels([data['xtickFnc'](v) for v in axis.get_xticks()]) axis.legend() fig.savefig(output, format='png', transparent=True)
def extract_value( self, track: typing.Mapping, context: typing.MutableMapping, ) -> typing.Optional[T]: """Extract the property value from a given track.""" for name in self.names: names = name.split('.') value = track.get(names[0], {}).get( names[1]) if len(names) == 2 else track.get(name) if value is None: if self.default is None: continue value = self.default if isinstance(value, bytes): value = value.decode() if isinstance(value, str): value = value.translate(_visible_chars_table).strip() if _is_unknown(value): continue value = self._deduplicate(value) result = self.handle(value, context) if result is not None and not _is_unknown(result): return result return None
def lineChart(size: typing.Tuple[float, float, int], data: typing.Mapping, output: typing.BinaryIO) -> None: x = data['x'] y = data['y'] fig: Figure = Figure(figsize=(size[0], size[1]), dpi=size[2]) # type: ignore FigureCanvas(fig) # Stores canvas on fig.canvas axis = fig.add_subplot(111) axis.grid(color='r', linestyle='dotted', linewidth=0.1, alpha=0.5) for i in y: yy = i['data'] axis.plot(x, yy, label=i.get('label'), marker='.', color='orange') axis.fill_between(x, yy, 0) axis.set_title(data.get('title', '')) axis.set_xlabel(data['xlabel']) axis.set_ylabel(data['ylabel']) if data.get('allTicks', True) is True: axis.set_xticks(x) if 'xtickFnc' in data: axis.set_xticklabels([data['xtickFnc'](v) for v in axis.get_xticks()]) axis.legend() fig.savefig(output, format='png', transparent=True)
def __init__(self, sim_spec: typing.Mapping, *, backend: str): """Construct from a simulation specification and the backend fixture.""" self.domain = sim_spec.get("domain", analytical.DOMAIN) self.time_step = 1e-3 self.max_time = 1e-2 self.shape = sim_spec.get("shape", (16, 16, 16)) self.backend_name = backend self.tolerance = sim_spec["tolerance"] dspace = numpy.array(analytical.DOMAIN, dtype=numpy.float64) / numpy.array( self.shape, dtype=numpy.float64) stencil_args = { "backend": self.backend_name, "shape": self.shape, "dspace": dspace, "time_step": self.time_step, } stencil_args.update(sim_spec.get("extra-args", {})) self.extra_args = sim_spec.get("extra-args", {}) self.stencil = sim_spec["stencil"](**stencil_args) self.reference = sim_spec["reference"] storage_b = self.stencil.storage_builder().default_origin( self.stencil.min_origin()) self.data = storage_b.from_array( numpy.fromfunction(self.get_reference, shape=self.shape)) self.data1 = copy.deepcopy(self.data) self._initial_state = copy.deepcopy(self.data) self._expected = numpy.fromfunction(functools.partial( self.get_reference, time=self.max_time), shape=self.shape)
def update(self, headers: typing.Mapping) -> None: """ Update current rate limits. """ self.limit = headers.get(self.HEADER_LIMIT) self.left = headers.get(self.HEADER_REMAINING) timestamp = headers.get(self.HEADER_RESET) self.reset = arrow.get( int(timestamp)) if timestamp is not None else None
def surfaceChart(size: typing.Tuple[float, float, int], data: typing.Mapping, output: typing.BinaryIO) -> None: x = data['x'] y = data['y'] z = data['z'] logger.debug('X: %s', x) logger.debug('Y: %s', y) logger.debug('Z: %s', z) x, y = np.meshgrid(x, y) z = np.array(z) logger.debug('X\': %s', x) logger.debug('Y\': %s', y) logger.debug('Z\': %s', z) fig: Figure = Figure(figsize=(size[0], size[1]), dpi=size[2]) # type: ignore FigureCanvas(fig) # Stores canvas on fig.canvas axis = fig.add_subplot(111, projection='3d') # axis.grid(color='r', linestyle='dotted', linewidth=0.1, alpha=0.5) if data.get('wireframe', False) is True: axis.plot_wireframe( x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm # type: ignore ) else: axis.plot_surface( x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm # type: ignore ) axis.set_title(data.get('title', '')) axis.set_xlabel(data['xlabel']) axis.set_ylabel(data['ylabel']) axis.set_zlabel(data['zlabel']) if data.get('allTicks', True) is True: axis.set_xticks(data['x']) axis.set_yticks(data['y']) if 'xtickFnc' in data: axis.set_xticklabels([data['xtickFnc'](v) for v in axis.get_xticks()]) if 'ytickFnc' in data: axis.set_yticklabels([data['ytickFnc'](v) for v in axis.get_yticks()]) fig.savefig(output, format='png', transparent=True)
def build_rss( directory: pathlib.Path, abook: typing.Mapping, reverse_url=lambda n, *a: n, renderers: typing.Optional[typing.Mapping[str, typing.Type[XMLRenderer]]] = None ) -> ET.Element: renderers = renderers or load_renderers() extensions = collections.OrderedDict([(n, cls(reverse_url)) for n, cls in renderers.items()]) for ext_name, ext in extensions.items(): LOG.debug(f'Registering XML namespaces for renderer: {ext_name}') for ns in ext.namespaces: ET.register_namespace(ns.prefix, ns.uri) rss = ET.Element('rss', attrib={'version': RSS_VERSION}) channel = ET.SubElement(rss, 'channel') for ext_name, ext in extensions.items(): LOG.debug(f'Rendering channel elements with renderer: {ext_name}') for el in ext.render_channel(abook): channel.append(el) for idx, item in enumerate(abook.get('items', []), start=1): item_elem = ET.SubElement(channel, 'item') for ext_name, ext in extensions.items(): LOG.debug( f'Rendering item #{idx} elements with renderer: {ext_name}') for elem in ext.render_item(abook, item, sequence=idx): item_elem.append(elem) return rss
def from_dict(cls, d: ty.Mapping) -> _Renderer: """Instantiate a new renderer from a dictionary of instructions.""" # raise error if invalid _validate_renderer(d) pkg_manager = d["pkg_manager"] users = d.get("existing_users", None) # create new renderer object renderer = cls(pkg_manager=pkg_manager, users=users) for mapping in d["instructions"]: method_or_template = mapping["name"] kwds = mapping["kwds"] this_instance_method = getattr(renderer, method_or_template, None) # Method exists and is something like 'copy', 'env', 'run', etc. if this_instance_method is not None: try: this_instance_method(**kwds) except Exception as e: raise RendererError( f"Error on step '{method_or_template}'. Please see the" " traceback above for details.") from e # This is actually a template. else: try: renderer.add_registered_template(method_or_template, **kwds) except TemplateError as e: raise RendererError( f"Error on template '{method_or_template}'. Please see above" " for more information.") from e return renderer
def compare_all_but(dict_a: t.Mapping, dict_b: t.Mapping, keys_to_ignore: t.Optional[t.Iterable] = None) -> bool: """ Compare two dictionaries, with the possibility to ignore some fields. :arg dict_a: First dictionary to compare :arg dict_b: Second dictionary to compare :kwarg keys_to_ignore: An iterable of keys whose values in the dictionaries will not be compared. :returns: True if the dictionaries have matching values for all of the keys which were not ignored. False otherwise. """ if keys_to_ignore is None: return dict_a == dict_b if not isinstance(keys_to_ignore, Set): keys_to_ignore = frozenset(keys_to_ignore) length_a = len(frozenset(dict_a.keys()) - keys_to_ignore) length_b = len(frozenset(dict_b.keys()) - keys_to_ignore) if length_a != length_b: return False sentinel = object() for key, value in ((k, v) for k, v in dict_a.items() if k not in keys_to_ignore): if value != dict_b.get(key, sentinel): return False return True
def _get_url_open_arg(name: str, args: typing.List, kwargs: typing.Mapping): arg_idx = _URL_OPEN_ARG_TO_INDEX_MAPPING.get(name) if arg_idx is not None: try: return args[arg_idx] except IndexError: pass return kwargs.get(name)
def validate(self, data: typing.Mapping): # validate projects (access check) for project in data.get("projects", []): if self.context[ "request"].user.iaso_profile.account != project.account: raise serializers.ValidationError( {"project_ids": "Invalid project ids"}) return data
def get_serializer(headers: typing.Mapping) -> BaseSerializer: """ parse header and try find serializer """ serializer_name = headers.get("serializer", None) if serializer_name: if serializer_name not in SERIALIZER_NAMES: raise SerializerNotFound(f"Serializer `{serializer_name}` not found") return SERIALIZER_NAMES[serializer_name] serializer_type = headers.get("content-type", None) if serializer_type: if serializer_type not in SERIALIZER_TYPES: raise SerializerNotFound(f"Serializer for `{serializer_type}` not found") return SERIALIZER_TYPES[serializer_type] raise SerializerNotFound( "You must set a value for header `serializer` or `content-type`" )
def find(self, q: typ.Mapping, **kwargs) -> typ.Dict: dlvl = q.get("dlvl") if dlvl == DLv.STUDY: collection = self.studies() elif dlvl == DLv.SERIES: collection = self.series() elif dlvl == DLv.INSTANCE: collection = self.instances() else: raise ValueError(f"No find available for q={q}") return collection
def process_msg(self, res: typing.Mapping) -> dict: """Convert the message from the photometer to unified format""" # temps # Convert whatever we read from the photometer # to a unified format payload = dict() payload['model'] = 'TESSv2' payload['cmd'] = 'r' payload['protocol_revision'] = 2 payload['zero_point'] = self.calibration payload['name'] = self.name msg_zp = res.get('ZP') if self.calibration != msg_zp: msg = "Calibration values don't agree: self:{} payload:{}".format( self.calibration, msg_zp) warnings.warn(msg, RuntimeWarning) msg_rev = res.get('rev') if msg_rev != 2: msg = "Protocol values don't agree: self:{} payload:{}".format( 2, msg_rev) warnings.warn(msg, RuntimeWarning) # Actual lectures from the photometer payload['freq_sensor'] = res.get('freq') # Actual measurement, in Hz payload['magnitude'] = res.get('mag') payload['temp_ambient'] = res.get('tamb') payload['temp_sky'] = res.get('tsky') # Add time information # Complete the payload with tstamp and TZ now = datetime.datetime.utcnow() payload['tstamp'] = now return payload
def check_required( params: typing.Mapping, required: typing.Union[typing.Sequence, typing.Set]) -> bool: """ :param params: :param required: :return: """ for name in required: if params.get(name) is None: return False return True
def _read_q_seq( checker_data: t.Mapping, sequence: t.List[str], ) -> t.Iterable[_Query]: for query_name in sequence: q_prop = f"{query_name}-query" if q_prop not in checker_data: continue url_prop = f"{query_name}-data-url" yield _Query( name=query_name, value_expr=checker_data[q_prop], url_expr=checker_data.get(url_prop), )
def fetch_raw_metadata(cls, samples: typing.Mapping, libraries: typing.Mapping, studies: typing.Mapping) -> SeqscapeRawMetadata: """ :param samples: a dict containing: key = name of the identifier type, value = set of identifier values :param libraries: same :param studies: same :return: """ raw_meta = SeqscapeRawMetadata() ss_connection = cls._get_connection(config.SEQSC_HOST, config.SEQSC_PORT, config.SEQSC_DB_NAME, config.SEQSC_USER) if samples: samples_fetched_by_names, samples_fetched_by_ids, samples_fetched_by_accession_nrs = \ cls._fetch_samples(ss_connection, samples.get('name'), samples.get('internal_id'), samples.get('accession_number')) raw_meta.add_fetched_entities(samples_fetched_by_names) raw_meta.add_fetched_entities(samples_fetched_by_accession_nrs) raw_meta.add_fetched_entities(samples_fetched_by_ids) samples_set = raw_meta.get_entities_without_duplicates_by_entity_type( 'sample') studies_for_samples = cls._fetch_studies_for_samples( ss_connection, samples_set) raw_meta.add_fetched_entities_by_association(studies_for_samples) if studies: studies_fetched_by_names, studies_fetched_by_ids, studies_fetched_by_accession_nrs = \ cls._fetch_studies(ss_connection, studies.get('name'), studies.get('internal_id'), studies.get('accession_number')) raw_meta.add_fetched_entities(studies_fetched_by_accession_nrs) raw_meta.add_fetched_entities(studies_fetched_by_ids) raw_meta.add_fetched_entities(studies_fetched_by_names) # Getting the sample-study associations: studies_set = raw_meta.get_entities_without_duplicates_by_entity_type( 'study') samples_for_study = cls._fetch_samples_for_studies( ss_connection, studies_set) raw_meta.add_fetched_entities_by_association(samples_for_study) if libraries: libraries_fetched_by_names, libraries_fetched_by_ids = \ cls._fetch_libraries(ss_connection, libraries.get('name'), libraries.get('internal_id')) raw_meta.add_fetched_entities(libraries_fetched_by_names) raw_meta.add_fetched_entities(libraries_fetched_by_ids) return raw_meta
def populate(fields: typing.Mapping): def tuplize(what): return tuple(x for x in what) mode = CONTROL_MODE_MAPPING[fields["mode"]] return TaskSpecificStatusReport.Run( stall_count=fields["stall_count"], demand_factor=fields["demand_factor"], electrical_angular_velocity=fields[ "electrical_angular_velocity"], mechanical_angular_velocity=fields[ "mechanical_angular_velocity"], torque=fields.get("torque", 0.0), # Not available until v0.2 u_dq=tuplize(fields["u_dq"]), i_dq=tuplize(fields["i_dq"]), mode=mode, spinup_in_progress=fields["spinup_in_progress"], rotation_reversed=fields["rotation_reversed"], controller_saturated=fields["controller_saturated"], )
def populate(fields: typing.Mapping): def tuplize(what): return tuple(x for x in what) mode = CONTROL_MODE_MAPPING[fields['mode']] return TaskSpecificStatusReport.Run( stall_count=fields['stall_count'], demand_factor=fields['demand_factor'], electrical_angular_velocity=fields[ 'electrical_angular_velocity'], mechanical_angular_velocity=fields[ 'mechanical_angular_velocity'], torque=fields.get('torque', 0.0), # Not available until v0.2 u_dq=tuplize(fields['u_dq']), i_dq=tuplize(fields['i_dq']), mode=mode, spinup_in_progress=fields['spinup_in_progress'], rotation_reversed=fields['rotation_reversed'], controller_saturated=fields['controller_saturated'], )
def fetch_raw_metadata(cls, samples: typing.Mapping, libraries: typing.Mapping, studies: typing.Mapping) -> SeqscapeRawMetadata: """ :param samples: a dict containing: key = name of the identifier type, value = set of identifier values :param libraries: same :param studies: same :return: """ raw_meta = SeqscapeRawMetadata() ss_connection = cls._get_connection(config.SEQSC_HOST, config.SEQSC_PORT, config.SEQSC_DB_NAME, config.SEQSC_USER) if samples: samples_fetched_by_names, samples_fetched_by_ids, samples_fetched_by_accession_nrs = \ cls._fetch_samples(ss_connection, samples.get('name'), samples.get('internal_id'), samples.get('accession_number')) raw_meta.add_fetched_entities(samples_fetched_by_names) raw_meta.add_fetched_entities(samples_fetched_by_accession_nrs) raw_meta.add_fetched_entities(samples_fetched_by_ids) samples_set = raw_meta.get_entities_without_duplicates_by_entity_type('sample') studies_for_samples = cls._fetch_studies_for_samples(ss_connection, samples_set) raw_meta.add_fetched_entities_by_association(studies_for_samples) if studies: studies_fetched_by_names, studies_fetched_by_ids, studies_fetched_by_accession_nrs = \ cls._fetch_studies(ss_connection, studies.get('name'), studies.get('internal_id'), studies.get('accession_number')) raw_meta.add_fetched_entities(studies_fetched_by_accession_nrs) raw_meta.add_fetched_entities(studies_fetched_by_ids) raw_meta.add_fetched_entities(studies_fetched_by_names) # Getting the sample-study associations: studies_set = raw_meta.get_entities_without_duplicates_by_entity_type('study') samples_for_study = cls._fetch_samples_for_studies(ss_connection, studies_set) raw_meta.add_fetched_entities_by_association(samples_for_study) if libraries: libraries_fetched_by_names, libraries_fetched_by_ids = \ cls._fetch_libraries(ss_connection, libraries.get('name'), libraries.get('internal_id')) raw_meta.add_fetched_entities(libraries_fetched_by_names) raw_meta.add_fetched_entities(libraries_fetched_by_ids) return raw_meta
def apply(self, spec: spec.Spec, storage: typing.Mapping) -> typing.Mapping: setting = self.get_setting(spec) allow_missing = (self.opcode is OpCode.CONFIG_REM or self.opcode is OpCode.CONFIG_RESET) value = self.coerce_value(setting, allow_missing=allow_missing) if self.opcode is OpCode.CONFIG_SET: if issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET on a non-primitive ' f'configuration parameter: {self.setting_name}') storage = storage.set(self.setting_name, value) elif self.opcode is OpCode.CONFIG_RESET: if issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE RESET on a non-primitive ' f'configuration parameter: {self.setting_name}') try: storage = storage.delete(self.setting_name) except KeyError: pass elif self.opcode is OpCode.CONFIG_ADD: if not issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET += on a primitive ' f'configuration parameter: {self.setting_name}') exist_value = storage.get(self.setting_name, setting.default) if value in exist_value: props = [] for f in dataclasses.fields(setting.type): if f.compare: props.append(f.name) if len(props) > 1: props = f' ({", ".join(props)}) violate' else: props = f'.{props[0]} violates' raise errors.ConstraintViolationError( f'{setting.type.__name__}{props} ' f'exclusivity constriant') new_value = exist_value | {value} storage = storage.set(self.setting_name, new_value) elif self.opcode is OpCode.CONFIG_REM: if not issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET -= on a primitive ' f'configuration parameter: {self.setting_name}') exist_value = storage.get(self.setting_name, setting.default) new_value = exist_value - {value} storage = storage.set(self.setting_name, new_value) return storage
def read(self, properties: typing.Mapping) -> Entity: properties = self.__transforms[0](dict(properties)) # transform forward for field_name, field_type in self.__field_type_map.items(): d = properties.get(self.__renames.get(field_name, field_name)) self.__field_dict[field_name].read(d) return self
def data_item_created(data_item_properties: typing.Mapping) -> str: return data_item_properties.get("created", "1900-01-01T00:00:00.000000")
def first_or_None(dict_: typing.Mapping, key) -> typing.Optional[str]: v = dict_.get(key) if isinstance(v, list): return first(v) return None
def plot_optimization_trace_mult_exp( time_list: typing.List, performance_list: typing.List, name_list: typing.List[str], title: str = None, logy: bool = False, logx: bool = False, properties: typing.Mapping = None, y_min: float = None, y_max: float = None, x_min: float = None, x_max: float = None, ylabel: str = "Performance", xlabel: str = "time [sec]", scale_std: float = 1, agglomeration: str = "mean", step: bool = False, ): ''' plot performance over time Arguments --------- time_list: typing.List[np.ndarray T] for each system (in name_list) T time stamps (on x) performance_list: typing.List[np.ndarray TxN] for each system (in name_list) an array of size T x N where N is the number of repeated runs of the system name_list: typing.List[str] names of all systems -- order has to be the same as in performance_list and time_list title: str title of the plot logy: bool y on log-scale logx: bool x on log-scale properties: typing.Mapping possible fields: "linestyles", "colors", "markers", "markersize", "labelfontsize", "linewidth", "titlefontsize", "gridcolor", "gridalpha", "dpi", "legendsize", "legendlocation", "ticklabelsize", "drawstyle", "incheswidth", "inchesheight", "loweryloglimit" > To turn off the legend, set legendlocation='None' y_min:float y min value y_max:float y max value x_min:float x min value x_max:float x max value ylabel: str y label xlabel: str y label scale_std: float scale of std (only used with agglomeration=="mean") agglomeration: str aggreation over repeated runs (either mean or median) step: bool plot as step function (True) or with linear interpolation (False) ''' if scale_std != 1 and agglomeration == "median": raise ValueError("Can not scale_std when plotting median") # complete properties if properties is None: properties = dict() properties = plot_util.fill_with_defaults(properties) #print(properties) # Set up figure ratio = 5 gs = matplotlib.gridspec.GridSpec(ratio, 1) fig = figure(1, dpi=int(properties['dpi'])) fig.set_size_inches(properties["incheswidth"], properties["inchesheight"]) ax1 = subplot(gs[0:ratio, :]) ax1.grid(True, linestyle='-', which='major', color=properties["gridcolor"], alpha=float(properties["gridalpha"])) if title is not None: fig.suptitle(title, fontsize=int(properties["titlefontsize"])) auto_y_min = 2**64 auto_y_max = -plottingscripts.utils.macros.MAXINT auto_x_min = 2**64 auto_x_max = -2**64 for idx, performance in enumerate(performance_list): performance = np.array(performance) color = next(properties["colors"]) marker = next(properties["markers"]) linestyle = next(properties["linestyles"]) name_list[idx] = name_list[idx].replace("_", " ") if logx and time_list[idx][0] == 0: time_list[idx][0] = 10**-1 #print("Plot %s" % agglomeration) if agglomeration == "mean": m = np.mean(performance, axis=0) lower = m - np.std(performance, axis=0) * scale_std upper = m + np.std(performance, axis=0) * scale_std elif agglomeration == "meanstderr": m = np.mean(performance, axis=0) lower = m - (np.std(performance, axis=0) / np.sqrt(performance.shape[0])) upper = m + (np.std(performance, axis=0) / np.sqrt(performance.shape[0])) elif agglomeration == "median": m = np.median(performance, axis=0) lower = np.percentile(performance, axis=0, q=25) upper = np.percentile(performance, axis=0, q=75) else: raise ValueError("Unknown agglomeration: %s" % agglomeration) if logy: lower[lower < properties["loweryloglimit"]] = properties["loweryloglimit"] upper[upper < properties["loweryloglimit"]] = properties["loweryloglimit"] m[m < properties["loweryloglimit"]] = properties["loweryloglimit"] # Plot m and fill between lower and upper if scale_std >= 0 and len(performance) > 1: ax1.fill_between(time_list[idx], lower, upper, facecolor=color, alpha=0.3, edgecolor=color, step="post" if step else None) if step: ax1.step(time_list[idx], m, color=color, linewidth=int(properties["linewidth"]), linestyle=linestyle, marker=marker, markersize=int(properties["markersize"]), label=name_list[idx], where="post", **properties.get("plot_args", {})) else: ax1.plot(time_list[idx], m, color=color, linewidth=int(properties["linewidth"]), linestyle=linestyle, marker=marker, markersize=int(properties["markersize"]), label=name_list[idx], drawstyle=properties["drawstyle"], **properties.get("plot_args", {})) # find out show from for this time_list show_from = 0 if x_min is not None: for t_idx, t in enumerate(time_list[idx]): if t > x_min: show_from = t_idx break auto_y_min = min(min(lower[show_from:]), auto_y_min) auto_y_max = max(max(upper[show_from:]), auto_y_max) auto_x_min = min(time_list[idx][0], auto_x_min) auto_x_max = max(time_list[idx][-1], auto_x_max) # Describe axes if logy: ax1.set_yscale("log") auto_y_min = max(0.1, auto_y_min) ax1.set_ylabel("%s" % ylabel, fontsize=properties["labelfontsize"]) if logx: ax1.set_xscale("log") auto_x_min = max(0.1, auto_x_min) ax1.set_xlabel(xlabel, fontsize=properties["labelfontsize"]) if properties["legendlocation"] != "None": leg = ax1.legend(loc=properties["legendlocation"], fancybox=True, prop={'size': int(properties["legendsize"])}, **properties.get("legend_args", {})) leg.get_frame().set_alpha(0.5) tick_params(axis='both', which='major', labelsize=properties["ticklabelsize"]) # Set axes limits if y_max is None and y_min is not None: ax1.set_ylim([y_min, auto_y_max + 0.01 * abs(auto_y_max - y_min)]) elif y_max is not None and y_min is None: ax1.set_ylim([auto_y_min - 0.01 * abs(auto_y_max - auto_y_min), y_max]) elif y_max is not None and y_min is not None and y_max > y_min: ax1.set_ylim([y_min, y_max]) else: ax1.set_ylim([ auto_y_min - 0.01 * abs(auto_y_max - auto_y_min), auto_y_max + 0.01 * abs(auto_y_max - auto_y_min) ]) if x_max is None and x_min is not None: ax1.set_xlim( [x_min - 0.1 * abs(x_min), auto_x_max + 0.1 * abs(auto_x_max)]) elif x_max is not None and x_min is None: ax1.set_xlim( [auto_x_min - 0.1 * abs(auto_x_min), x_max + 0.1 * abs(x_max)]) elif x_max is not None and x_min is not None and x_max > x_min: ax1.set_xlim([x_min, x_max]) else: ax1.set_xlim( [auto_x_min, auto_x_max + 0.1 * abs(auto_x_min - auto_x_max)]) return fig
def first_or_empty_string(dict_: typing.Mapping, key) -> str: v = dict_.get(key) if isinstance(v, list): return first(v) return ''
def get(v: typing.Mapping, vld: "Validator", **_): return v.get(vkw(vld)["name"], null)
def find(self, query: typ.Mapping, **kwargs): val = query.get('q') for k, v in self.cache.items(): if v == val: return k
def _convert(self, value: ty.Mapping, path: Path, *args: ty.Any, entity: ty.Optional[ConvertibleEntity] = None, **context: ty.Any) -> T: errors: ty.List[Error] = [] result = self.RESULT_CLASS() unacceptable = [] if entity == ConvertibleEntity.REQUEST: for property_name in self.read_only: if property_name in value: unacceptable.append(property_name) if unacceptable: errors.append( Error( path, self.messages['read_only'].format( comma_delimited(unacceptable)))) elif entity == ConvertibleEntity.RESPONSE: for property_name in self.write_only: if property_name in value: unacceptable.append(property_name) if unacceptable: errors.append( Error( path, self.messages['write_only'].format( comma_delimited(unacceptable)))) required = self.required if entity == ConvertibleEntity.REQUEST: required -= self.read_only elif entity == ConvertibleEntity.RESPONSE: required -= self.write_only missed = set() for property_name in required: if property_name not in value: missed.add(property_name) if missed: errors.append( Error( path, self.messages['required'].format(comma_delimited(missed)))) additional_properties = set(value) - set(self.properties) not_matched = [] for property_name in additional_properties: property_value = value[property_name] property_path = path / property_name matched = None for pattern, property_type in self.pattern_properties.items(): try: matched = pattern.match(property_name) except (TypeError, ValueError): continue if not matched: continue try: result.pattern_properties[ property_name] = property_type.convert(property_value, property_path, entity=entity, **context) except SchemaError as e: errors.extend(e.errors) else: break if matched: continue if self.additional_properties is True: result.additional_properties[property_name] = property_value continue elif isinstance(self.additional_properties, AbstractConvertible): try: result.additional_properties[ property_name] = self.additional_properties.convert( property_value, property_path, entity=entity, **context) except SchemaError as e: errors.extend(e.errors) else: continue not_matched.append(property_name) if not_matched: errors.append( Error( path, self.messages['additional_properties'].format( comma_delimited(not_matched)))) for property_name, property_type in self.properties.items(): if property_name in missed: continue if entity == ConvertibleEntity.REQUEST and property_name in self.read_only: continue if entity == ConvertibleEntity.RESPONSE and property_name in self.write_only: continue try: result.properties[property_name] = property_type.convert( value.get(property_name, Undefined), path / property_name, entity=entity, **context) except SchemaError as e: errors.extend(e.errors) continue except UndefinedResultError: continue if errors: raise SchemaError(*errors) return result
def _decode_topic(self, data: typing.Mapping): return data.get("topic", "")