def _parse_losses(self, losses):
        """Parse the raw outputs (losses) of the network.

        Args:
            losses (dict): Raw output of the network, which usually contain
                losses and other necessary information.

        Returns:
            tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
                which may be a weighted sum of all losses, log_vars contains \
                all the variables to be sent to the logger.
        """
        log_vars = OrderedDict()
        for loss_name, loss_value in losses.items():
            if isinstance(loss_value, torch.Tensor):
                log_vars[loss_name] = loss_value.mean()
            elif isinstance(loss_value, list):
                log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
            else:
                raise TypeError(
                    f'{loss_name} is not a tensor or list of tensor')

        loss = sum(_value for _key, _value in log_vars.items()
                   if 'loss' in _key)

        log_vars['loss'] = loss
        for loss_name, loss_value in log_vars.items():
            if dist.is_available() and dist.is_initialized():
                loss_value = loss_value.data.clone()
                dist.all_reduce(loss_value.div_(dist.get_world_size()))
            log_vars[loss_name] = loss_value.item()

        return loss, log_vars
Beispiel #2
0
def main(args):
    model_index_file = MMCLS_ROOT / 'model-index.yml'
    model_index = Config.fromfile(model_index_file)
    models = OrderedDict()
    for file in model_index.Import:
        metafile = Config.fromfile(MMCLS_ROOT / file)
        models.update({model.Name: model for model in metafile.Models})

    logger = get_root_logger(log_file='benchmark_test_image.log',
                             log_level=logging.INFO)

    if args.models:
        patterns = [re.compile(pattern) for pattern in args.models]
        filter_models = {}
        for k, v in models.items():
            if any([re.match(pattern, k) for pattern in patterns]):
                filter_models[k] = v
        if len(filter_models) == 0:
            print('No model found, please specify models in:')
            print('\n'.join(models.keys()))
            return
        models = filter_models

    summary_data = {}
    for model_name, model_info in models.items():

        config = Path(model_info.Config)
        assert config.exists(), f'{model_name}: {config} not found.'

        logger.info(f'Processing: {model_name}')

        http_prefix = 'https://download.openmmlab.com/mmclassification/'
        dataset = model_info.Results[0]['Dataset']
        if args.checkpoint_root is not None:
            root = Path(args.checkpoint_root)
            checkpoint = root / model_info.Weights[len(http_prefix):]
            checkpoint = str(checkpoint)
        else:
            checkpoint = None

        try:
            # build the model from a config file and a checkpoint file
            result = inference(MMCLS_ROOT / config, checkpoint,
                               classes_map[dataset], args)
            result['valid'] = 'PASS'
        except Exception as e:
            logger.error(f'"{config}" : {repr(e)}')
            result = {'valid': 'FAIL'}

        summary_data[model_name] = result
        # show the results
        if args.show:
            imshow_infos(args.img, result, wait_time=args.wait_time)

    show_summary(summary_data)
 def assertOrderedDictEqual(od1: OrderedDict, od2: OrderedDict):
     errors = []
     if len(od1.keys()) != len(od2.keys()):
         raise AssertionError("Number of items don't match: {} {}".format(od1, od2))
     for i, j in zip(od1.items(), od2.items()):
         if i[0] != j[0]:
             errors.append("Keys in {} and {} don't match".format(i, j))
         if i[1] != j[1]:
             errors.append("Values in {} and {} don't match".format(i, j))
     if errors:
         raise AssertionError(", ".join(errors))
     return True
Beispiel #4
0
class GetSetInstruction(InstructionBase):
    """Aggregates a number instructions that allow a CPU to get and set values.

    The instruction uses funct7 to identify a "Xetter" to which to delegate.
    """
    def __init__(self, xetters):
        """Constructor

        Parameters
        ----------

        xetters: mapping of register number (integer 0-127) to a subclass of Xetter.
        """
        super().__init__()
        self.xetters = OrderedDict(xetters)

    def elab(self, m):
        # By default, wire done to start to prevent hangs
        m.d.comb += self.done.eq(self.start)

        # Based on funct7, route start, done and output signals
        m.submodules['f7buf'] = f7buf = ValueBuffer(self.funct7, self.start)
        for reg, x in self.xetters.items():
            m.d.comb += [
                x.in0.eq(self.in0),
                x.in1.eq(self.in1),
            ]
            with m.If(f7buf.output == reg):
                m.d.comb += [
                    x.start.eq(self.start),
                    self.output.eq(x.output),
                    self.done.eq(x.done),
                ]
Beispiel #5
0
def _walk_validator(
    instance: OrderedDict,
    validator_dict: OrderedDict,
    validator_function: Callable[[Mapping, Any, str],
                                 Iterator[ValidationError]],
    position=None,
    allow_missing_keys: bool = False,
) -> Iterator[ValidationError]:
    """Walk instance and validation dict entries in parallel and apply a validator func.

    This function can be used to recursively walk both the instance dictionary and the
    custom validation dictionary in parallel. Once a leaf dictionary entry is reached,
    the validation function is applied to the selected items.

    Parameters
    ----------
    instance:
        Tree serialization (with default dtypes) of the instance
    validator_dict:
        OrderedDict representation of the validation structure.
    validator_function:
        Custom python validator function to apply along the (nested) dictionary
    position:
        String representation of the current nested schema position
    allow_missing_keys:
        If True will skip validation if the requested key to validate does not exist.

    Yields
    ------
    asdf.ValidationError

    """
    if position is None:  # pragma: no cover
        position = []
    if isinstance(validator_dict, dict):
        for key, item in validator_dict.items():
            if isinstance(item, Mapping):
                yield from _walk_validator(
                    instance[key],
                    validator_dict[key],
                    validator_function,
                    position=position + [key],
                    allow_missing_keys=allow_missing_keys,
                )
            else:
                if key in instance:
                    yield from validator_function(instance[key], item,
                                                  position + [key])
                elif allow_missing_keys:  # pragma: no cover
                    pass
                else:  # pragma: no cover
                    pass
                    # TODO: if a property is not required the key might be missing
                    # yield ValidationError(f"Missing key {key}")

    else:
        yield from validator_function(instance, validator_dict, position)
Beispiel #6
0
    def process(self, data: OrderedDict) -> OrderedDict:
        """
        Processing PyTorch model parameter.
        The data is a state_dict of a PyTorch model.
        """
        new_data = OrderedDict()
        for layer_name, layer_params in data.items():
            new_data[layer_name] = self._process_layer(layer_params)

        return new_data
def parse_launch_arguments(
        launch_arguments: List[Text]) -> List[Tuple[Text, Text]]:
    """Parse the given launch arguments from the command line, into list of tuples for launch."""
    parsed_launch_arguments = OrderedDict()  # type: ignore
    for argument in launch_arguments:
        count = argument.count(':=')
        if count == 0 or argument.startswith(':=') or (
                count == 1 and argument.endswith(':=')):
            raise RuntimeError(
                "malformed launch argument '{}', expected format '<name>:=<value>'"
                .format(argument))
        name, value = argument.split(':=', maxsplit=1)
        parsed_launch_arguments[name] = value  # last one wins is intentional
    return parsed_launch_arguments.items()
Beispiel #8
0
def print_test_list(test_list: typing.OrderedDict) -> None:
    """
    Prints a list of all tests and demos in test_list.
    """
    namelen = max(len(name) for name, _ in test_list.keys())

    for current_type in ['test', 'demo', 'benchmark']:
        for (name, type_), (_, lang, desc, _) in test_list.items():
            if type_ == current_type:
                print(f"[{type_} {lang:3}] {name:{namelen}}  {desc}")

    print("")
    print("To see how to run them, add --help to your invocation!")
    print("")
    print("Remember: Testing is the future, and the future starts with: You.")
    print("")
Beispiel #9
0
def _conf_intervals_to_df(cis: OrderedDict) -> DataFrame:
    """Convert lmfit confidence intervals to pandas.DataFrame."""
    ncis = len(list(cis.values())[0]) // 2  # e.g. [μ - σ, μ, μ + σ]
    return DataFrame(
        ((name, *[climit for _, climit in climits])
         for name, climits in cis.items()),
        columns=(
            "name",
            *[
                f"{sign}{clevel:.3f}"
                for sign, (clevel, _) in zip(("-", ) * ncis + ("", ) +
                                             ("+", ) * ncis,
                                             list(cis.values())[0])
            ],
        ),
    )
Beispiel #10
0
    def get_tail_frequencies(self,
                             date: datetime) -> Tuple[int, Dict[int, float]]:
        # TODO: 其实没必要在这里保持有序

        lower_bound, upper_bound = self._get_boundaries(date)

        self.cur.execute(r'''SELECT * FROM get_tail_count(%s, %s)''',
                         (lower_bound, upper_bound))
        rows = self.cur.fetchall()

        counts = OrderedDict({r[0]: r[1] for r in rows})
        sum_count = sum(counts.values())
        frequencies = OrderedDict(
            (tail, float(count) / sum_count) for tail, count in counts.items())

        return (sum_count, frequencies)
Beispiel #11
0
def process_data(data):
    prerequisites = OrderedDict()
    list_data = []
    for line in data:
        sentence = line.split()
        first = sentence[1]
        second = sentence[7]
        list_data.append((first, second))
        if second in prerequisites and first not in prerequisites[second]:
            prerequisites[second].append(first)
        else:
            prerequisites[second] = [first]
        prerequisites[second].sort()
    sorted = list(prerequisites.items())
    sorted.sort(key=lambda x: x[0])
    prerequisites = {x[0]: x[1] for x in sorted}
    return prerequisites, list_data
Beispiel #12
0
def reduce_duplicated_timestamps(ys, ts, verbose=False):
    ''' Reduce duplicated timestamps in a timeseries by averaging the values

    Parameters
    ----------
    ys : array
        Dependent variable
    ts : array
        Independent variable
    verbose : bool
        If True, will print a warning message

    Returns
    -------
    ys : array
        Dependent variable
    ts : array
        Independent variable, with duplicated timestamps reduced by averaging the values

    '''
    ys = np.asarray(ys, dtype=np.float)
    ts = np.asarray(ts, dtype=np.float)
    assert ys.size == ts.size, 'The size of time axis and data value should be equal!'

    if len(ts) != len(set(ts)):
        value = OrderedDict()
        for t, y in zip(ts, ys):
            if t not in value:
                value[t] = [y]
            else:
                value[t].append(y)

        ts = []
        ys = []
        for k, v in value.items():
            ts.append(k)
            ys.append(np.mean(v))

        ts = np.array(ts)
        ys = np.array(ys)

        if verbose:
            print(
                'Duplicate timestamps have been combined by averaging values.')
    return ys, ts
class ScopedSymbolTable:
    def __init__(self, scope_name, scope_level, enclosing_scope=None):
        self._symbols = OrderedDict()
        self.scope_name = scope_name
        self.scope_level = scope_level
        self.enclosing_scope = enclosing_scope

    def _init_builtins(self):
        self.insert(BuiltinTypeSymbol('INTEGER'))
        self.insert(BuiltinTypeSymbol('REAL'))

    def __str__(self):
        h1 = 'SCOPE (SCOPED SYMBOL TABLE)'
        lines = ['\n', h1, '=' * len(h1)]
        for header_name, header_value in (('Scope name', self.scope_name),
                                          ('Scope level', self.scope_level),
                                          ('Enclosing scope',
                                           self.enclosing_scope.scope_name
                                           if self.enclosing_scope else None)):
            lines.append('%-15s: %s' % (header_name, header_value))
        h2 = 'Scope (Scoped symbol table) contents'
        lines.extend([h2, '-' * len(h2)])
        lines.extend(
            ('%7s: %r' % (key, value)) for key, value in self._symbols.items())
        lines.append('\n')
        s = '\n'.join(lines)
        return s

    __repr__ = __str__

    def insert(self, symbol):
        print('Insert: %s' % symbol.name)
        self._symbols[symbol.name] = symbol

    def lookup(self, name):
        print('Lookup: %s. (Scope name: %s)' % (name, self.scope_name))
        # 'symbol' is either an instance of the Symbol class or None
        symbol = self._symbols.get(name)

        if symbol is not None:
            return symbol

        # recursivamente va hacia arriba en la cadena buscando el nombre.
        if self.enclosing_scope is not None:
            return self.enclosing_scope.lookup(name)
Beispiel #14
0
def ordered_dict_insert(dct: Field,
                        new_key: str,
                        new_value: Union[str, bool],
                        before_key: Optional[str] = None,
                        after_key: Optional[str] = None) -> None:
    output = OrderedDict()
    inserted: bool = False
    for key, value in dct.items():
        if not inserted and before_key is not None and key == before_key:
            output[new_key] = new_value
            inserted = True
        output[key] = value
        if not inserted and after_key is not None and key == after_key:
            output[new_key] = new_value
            inserted = True
    if not inserted:
        output[new_key] = new_value
    dct.clear()
    for key, value in output.items():
        dct[key] = value
def run_update_project_status(filter_string: str) -> None:
    """Run the workflow to update project status for all filtered projects."""
    logger.info("### Start update project status workflow ###")
    active_projects = get_projects(status="active")
    finished_projects = filter_projects_by_name_and_progress(
        active_projects,
        filter_string,
        progress_threshold=100,
    )

    inactive_projects = get_projects(status="inactive")
    # We sort projects by their attribute "projectNumber" to ensure that
    # always the lowest one will be set to "status=active" next.
    inactive_projects = OrderedDict(
        sorted(inactive_projects.items(),
               key=lambda x: int(x[1]["projectNumber"])))

    new_active_projects = filter_projects_by_name_and_progress(
        inactive_projects,
        filter_string,
        progress_threshold=0,
    )[0:len(finished_projects)]

    # Here we check that there is at least one inactive project
    # which can be activated in the app.
    # We do this to avoid that there is no project left in the app.
    if len(new_active_projects) > 0:
        for project_id in finished_projects:
            project_name = active_projects[project_id]["name"]
            set_status_in_firebase(project_id,
                                   project_name,
                                   new_status="finished")

        for project_id in new_active_projects:
            project_name = inactive_projects[project_id]["name"]
            set_status_in_firebase(project_id,
                                   project_name,
                                   new_status="active")
    logger.info("### Finished update project status workflow ###")
Beispiel #16
0
 async def shop(self, ctx):
     shop = self.load_shop()
     embeds = []
     shop = OrderedDict(
         sorted(shop.items(), key=lambda x: getitem(x[1], "raw_price")))
     chunks = divide_chunks(list(shop.keys()), 5)
     i = 0
     for chunk in list(chunks):
         i += 1
         embed = discord.Embed(color=discord.Color.teal())
         embed.title = "The Waifu Shop"
         for item in chunk:
             v = (f" / `{shop[item]['raw_price']} erin`"
                  if shop[item]['price']['item'] != "erin" else "")
             embed.add_field(
                 name=item,
                 value=
                 f"{shop[item]['name']} {shop[item]['emoji']} | Costs `{shop[item]['price']['quantity']} {shop[item]['price']['item']}`"
                 + v,
                 inline=False,
             )
         embed.set_footer(text=f"Page {i}/{len(chunks)}",
                          icon_url=ctx.author.avatar_url)
         embeds.append(embed)
     paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx)
     paginator.add_reaction(
         "\N{Black Left-Pointing Double Triangle with Vertical Bar}",
         "first")
     paginator.add_reaction("\N{Black Left-Pointing Double Triangle}",
                            "back")
     paginator.add_reaction("\N{CROSS MARK}", "lock")
     paginator.add_reaction("\N{Black Right-Pointing Double Triangle}",
                            "next")
     paginator.add_reaction(
         "\N{Black Right-Pointing Double Triangle with Vertical Bar}",
         "last")
     await paginator.run(embeds)
Beispiel #17
0
def main():
    args = parser().parse_args()
    results = OrderedDict()
    max_results = 10
    index = 0
    home = Path(args.base) if args.base else Path().home()

    ignore_case = False
    print(args)

    search = args.search.lower() if ignore_case else args.search
    for path in home.glob('**/*'):
        name = path.name.lower() if ignore_case else path.name
        if path.is_dir() and name.endswith(search):
            results[string.ascii_lowercase[index]] = path.as_posix()
            index += 1
            if len(results) >= max_results:
                break

    if len(results) < 1:
        print(f'No such directory for <{args.search}>')
        return -1
    for index, elem in results.items():
        print(index, '--', elem)
    keys = list(results.keys())
    wrong_key = True
    while wrong_key:
        choice = input(
            f'Please enter a key between \'{keys[0]}\' and \'{keys[-1]}\'\n')
        try:
            print(f'Entering directory {results[choice]} ...')
            wrong_key = False
        except KeyError:
            print(
                f'Wrong key. Please chose acorrect one between {keys[0]} and {keys[-1]}'
            )
            wrong_key = True
Beispiel #18
0
def select_name_v3(voiceList: OrderedDict) -> int:
    print(*(f'{x["vVoiceName"]}: {y}' for (x, y) in voiceList.items()),
          sep='\n')
    return int(input('?'))
class ContentCache(CacheABC):
    # pylint: disable=too-many-instance-attributes

    def __init__(self,
                 cache_folder: str,
                 temporary_dir: str,
                 max_cache_size_bytes: int = 1024 * 1024 * 1024,
                 max_workers: int = 10,
                 contents_load: bool = True,
                 contents_save_interval_secs: float = 5.0,
                 url_resolver: URLResolverABC = URLResolver()):
        print(
            f'ContentCache.__init__: cache_folder={cache_folder}, temporary_dir={temporary_dir}'
        )
        self.cache_folder: str = cache_folder
        self.max_cache_size_bytes: int = max_cache_size_bytes
        self.temporary_dir = temporary_dir
        self.contents_save_interval_secs = contents_save_interval_secs
        self.url_resolver = url_resolver

        self._lock = threading.RLock()

        self._executor = ThreadPoolExecutor(max_workers)
        self._tasks: Dict[URL, Task] = {}

        self._contents: OrderedDict[str, Content] = OrderedDict()
        self._contents_size: int = 0

        self._contents_save_timer = None

        if contents_load:
            self._load_contents()

    def __del__(self):
        self._save_contents()
        self._executor.shutdown()

    def _load_contents(self):
        contents_json_path = os.path.join(self.cache_folder, 'contents.json')
        if not os.path.exists(contents_json_path):
            return

        with self._lock:
            with open(contents_json_path, 'r') as file:
                content_json = json.load(file, object_pairs_hook=OrderedDict)

            self._contents = OrderedDict({
                key: Content(id=value['id'],
                             state=Content.State[value['state']],
                             filepath=os.path.join(self.cache_folder,
                                                   value['filepath']),
                             type=value['type'],
                             length=value['length'])
                for key, value in content_json.items()
            })
            self._contents_size = sum(
                [c.length for c in self._contents.values()])

            print(
                f'_load_contents: {len(self._contents)} from {contents_json_path}'
            )

    def _save_contents(self):
        contents_json_path = os.path.join(self.cache_folder, 'contents.json')
        with self._lock:
            print(
                f'_save_contents: {len(self._contents)} to {contents_json_path}'
            )
            content_json = {
                key: {
                    'id':
                    value.id,
                    'state':
                    value.state.name,
                    'filepath':
                    os.path.basename(value.filepath) if value.filepath else '',
                    'type':
                    value.type,
                    'length':
                    value.length
                }
                for key, value in self._contents.items()
            }
            with open(contents_json_path, 'w') as file:
                json.dump(content_json, file)

    def _schedule_save_contents(self):
        if self._contents_save_timer is not None:
            self._contents_save_timer.cancel()

        self._contents_save_timer = threading.Timer(
            self.contents_save_interval_secs, self._save_contents)
        self._contents_save_timer.start()

    def _to_content_filepath(self, content_id: str) -> str:
        return os.path.join(self.cache_folder, content_id)

    def _fetch(self, task: Task):
        # pylint: disable=too-many-statements
        with self._lock:
            if task.state is not Task.State.QUEUING:
                raise ValueError(
                    f'task (={task.url}) is invalid state (={task.state})')

            task.state = Task.State.RUNNING
            content_id = task.content_id

        content_filepath = self._to_content_filepath(content_id)
        content = Content(content_id, Content.State.FETCHING)

        try:
            temp_fd, temp_path = tempfile.mkstemp(dir=self.temporary_dir)
            with os.fdopen(temp_fd, 'bw') as temp_file:
                response = self.url_resolver.resolve(task.url)
                response.raise_for_status()

                content_type = response.headers.get('Content-Type')
                content_length_text = response.headers.get('Content-Length')
                content_length = int(
                    content_length_text) if content_length_text else 0
                fetch_size = 0

                with self._lock:
                    task.content_length = content_length
                    task.fetched_size = fetch_size

                for chunk in response.iter_content(chunk_size=65536):
                    fetch_size += len(chunk)
                    with self._lock:
                        task.fetched_size = fetch_size
                        if task.state is not Task.State.RUNNING:
                            raise InterruptedError(
                                f'task (={task.url}) fetch was interrupted')
                    temp_file.write(chunk)

            os.rename(temp_path, content_filepath)

            content_length = os.path.getsize(content_filepath)
            with self._lock:
                self._contents_size += content_length
                task.state = Task.State.SUCCESS

                content.state = Content.State.CACHED
                content.filepath = content_filepath
                content.length = content_length
                content.type = content_type

        except:  # pylint: disable=bare-except
            traceback.print_exc()
            with self._lock:
                content.state = Content.State.FAILED

                if task.state is Task.State.RUNNING:
                    task.state = Task.State.FAILURE
                else:
                    pass  # keep state

            if temp_path is not None:
                os.remove(temp_path)
        finally:
            with self._lock:
                self._contents[content_id] = content
                del self._tasks[task.url]

        self._invoke_callbacks(task)
        self._schedule_save_contents()
        return task

    @staticmethod
    def _invoke_callback(callback, content):
        try:
            callback(content)
        except:  # pylint: disable=bare-except
            traceback.print_exc()

    def _invoke_callbacks(self, task: Task):
        with self._lock:
            task_callbacks_copy = list(task.callbacks)
            task.callbacks.clear()
            content = self._contents[task.content_id]

        for callback in task_callbacks_copy:
            self._invoke_callback(callback, content)

        return task

    def cancel_fetch(self, url: URL):
        with self._lock:
            task = self.try_get_task(url)
            if task is None:
                return

            if task.state != Task.State.RUNNING:
                return

            task.state = Task.State.CANCELED

    def remove_content(self, url: URL) -> bool:
        with self._lock:
            content = self.try_get_content(url)
            if content is None:
                return False

            del self._contents[content.id]

            self._schedule_save_contents()

        return True

    def try_get_content(self, url: URL) -> Union[Content, None]:
        content_id = Content.to_content_id(url)
        with self._lock:
            if content_id not in self._contents:
                return None

            content = self._contents[content_id]

            # LRU implementation
            self._contents.move_to_end(content_id)
            excess_cache_size = max(
                0, self._contents_size - self.max_cache_size_bytes)
            if excess_cache_size > 0:
                for content_id in self._contents.keys():
                    content = self._contents[content_id]

                    if content.length == 0:
                        continue

                    del self._contents[content_id]
                    self._contents_size -= content.length

                    if os.path.exists(content.file_path):
                        try:
                            os.remove(content.file_path)
                        except:  # pylint: disable=bare-except
                            traceback.print_exc()

                    excess_cache_size -= content.length
                    if excess_cache_size <= 0:
                        break

                self._schedule_save_contents()

            return content

    def try_get_task(self, url: URL) -> Union[Task, None]:
        with self._lock:
            return self._tasks[url] if url in self._tasks else None

    def async_get_content(self, url: URL, callback: Callback) -> Future:
        with self._lock:
            content = self.try_get_content(url)
            if content is not None:
                if content.state in {
                        Content.State.CACHED, Content.State.FETCHING
                }:
                    return self._executor.submit(self._invoke_callback,
                                                 callback, content)

                self.remove_content(url)

            elif url in self._tasks:
                task = self._tasks[url]

                if task.state in {Task.State.QUEUING, Task.State.RUNNING}:
                    task.callbacks.append(callback)
                    return task.future

            task = Task(url, Task.State.QUEUING, [callback])
            task.future = self._executor.submit(self._fetch, task)
            self._tasks[url] = task
            return task.future
Beispiel #20
0
def make_initial_simplex_table(simplex_table: OrderedDict, constraints: list,
                               f_o: dict, big_m, m1, urs):
    """ 
    <summary>
        <args>
            simplex_table: type OrderedDict, se le ingresa vacío para utilizarlo para hacer la matriz de f_o y constraints.
            constraints: type lista de diccionarios, trae la información de los constraints.
            f_o: type dict, trae la información de la función objetivo.
        </args>
        Crea la simplex table inicial preparando para tener big m.
        Retorna la tabla inicial preparada.
    </summary>
    """
    if (m1):
        big_m = -big_m

    # Agarrar las variables pertinentes.
    variables = set()
    for i in constraints:
        for k in i.keys():
            variables.add(k)
    for k in f_o.keys():
        variables.add(k)

    f_o_var = set(variables) - set([x for x in f_o.keys()])
    for i in f_o_var:
        f_o.update({i: 0})

    index = 0
    for i in constraints:
        constraints_i = variables - set(i.keys())
        for j in constraints_i:
            constraints[index].update({j: 0})
        index += 1

    temp = {}
    temp.update({k: 0 for k, v in f_o.items()})
    for i in constraints:
        temp.update({k: 0 for k, v in i.items()})
    del temp['symbol']

    #
    # print('\n\n\n\n\n\n\n')
    # print(constraints)
    # print(temp)

    print('\n\n\n')
    # Add the actual variables.
    actual_vars = []
    for k, v in temp.items():
        if (is_actual_variable(k)):
            actual_vars.append(k)
    actual_vars.sort(key=lambda x: x[1:])
    for i in actual_vars:
        simplex_table.update({i: []})
    # Add the slack/excess variables.
    s_e_vars = []
    for k, v in temp.items():
        if (is_slack_excess_variable(k)):
            s_e_vars.append(k)
    s_e_vars.sort(key=lambda x: x[1:])
    for i in s_e_vars:
        simplex_table.update({i: []})
    # Add the artifitial variables.
    a_vars = []
    for k, v in temp.items():
        if (is_artifitial_variable(k)):
            a_vars.append(k)
    a_vars.sort(key=lambda x: x[1:])
    for i in a_vars:
        simplex_table.update({i: []})

    # Add the z.
    simplex_table.update({'z': []})

    # Add the c.
    simplex_table.update({'c': []})

    # Copying the information in f_p and constraints to the simplex table:
    for k, v in simplex_table.items():
        simplex_table[k].append(f_o[k])
    for k, v in simplex_table.items():
        for i in range(len(constraints)):
            simplex_table[k].append(constraints[i][k])

    # {'X1': [-2, 0.5, 1, 1], 'X2': [-3, 0.25, 3, 1], 's1': [0, 1, 0, 0], 'e2': [0, 0, -1, 0], 'a2': [-10000, 0, 1, 0], 'a3': [-10000, 0, 0, 1], 'z': [1, 0, 0, 0], 'c': [0, 4, 20, 10], 'pivot': [0, 0, 0, 0], 'VB': [0, 0, 0, 0], 'index': [0, 1, 2, 3]}

    # Transpose.
    simplex_table_T = [{} for x in range(len(simplex_table['z']))]
    for k, v in simplex_table.items():
        for i in range(len(simplex_table_T)):
            simplex_table_T[i].update({k: v[i]})

    # Add URS vars.
    urs_variables = list(zip(actual_vars, urs))
    for i in range(len(simplex_table_T)):
        for j in urs_variables:
            if (j[1] == 1):
                simplex_table_T[i].update({
                    (str(j[0]) + "''"):
                    -simplex_table_T[i][j[0]]
                })
    # for i in simplex_table_T: print(i)
    # exit()

    # new_row:
    # print("simplex_table_T: ", simplex_table_T)
    new_row_0 = []
    for i in simplex_table_T:
        contributes_to_new_row = False
        for k, v in i.items():
            if ((k[0] == 'a') and (v != 0)):
                contributes_to_new_row = True
        if contributes_to_new_row:
            new_row_0.append(i.copy())
    if (len(new_row_0) == 0):
        new_row_0 = [simplex_table_T[0].copy()]

    # print("new_row_0", new_row_0)
    new_row = []
    for i in new_row_0:
        is_first_row = False
        for k, v in i.items():
            if ((k == 'z') and (v == 1)):
                is_first_row = True
                break

        if is_first_row:
            new_row.append(i)
        else:
            new_row.append(mult_row(i, big_m))

    # Sum all the interesting rows.
    # print(new_row)
    new_row_0 = {k: 0 for k, v in new_row[0].items()}
    for i in new_row:
        for k, v in i.items():
            new_row_0[k] += v

    # Make artificial letters 0.
    for k, v in new_row_0.items():
        if (k[0] == 'a'):
            new_row_0[k] = 0

    # Making the new_row_0 the new zeroth row.
    simplex_table_T[0] = new_row_0

    # Adding pivot column and VB column. Adding an index.
    index = 0
    for i in range(len(simplex_table_T)):
        simplex_table_T[i].update({'pivot': 0})
        # simplex_table_T[i].update( {'VB':0} )
        simplex_table_T[i].update({'index': index})
        index += 1

    # Setting up VB.
    n = 1
    index = 0
    for i in simplex_table_T[1:]:
        if (i.get(f"s{n}") != None):
            simplex_table_T[index]['VB'] = simplex_table_T[index][
                f"s{n}"] * simplex_table_T[index]['c']
        elif (i.get(f"e{n}") != None):
            simplex_table_T[index]['VB'] = simplex_table_T[index][
                f"e{n}"] * simplex_table_T[index]['c']
        index += 1
        n += 1

    return simplex_table_T
    def _generate_daily_qst_reference(self) -> Optional[str]:
        stuff = []

        # 跑团日报
        if self.daily_qst_thread_id is not None:
            daily_qsts = self.db.get_responses_match(self.date,
                                                     self.daily_qst_thread_id,
                                                     r'^\[头条\]\s*?<br />\r?$')
            if len(daily_qsts) > 0:
                daily_qst = daily_qsts[-1]
                line1 = daily_qst[1].splitlines()[0]
                m = re.search(r'(day .+?)\s*?<br />', line1)
                if m is None:
                    issue_text = ''
                else:
                    issue = m.group(1)
                    if len(issue) > 10:
                        issue = issue[:10] + "…"
                    issue_text = f"〔{issue}〕"
                stuff.append(
                    f"跑团日报{issue_text}:>>No.{daily_qst[0]} (位于原串第{(daily_qst[2]-1)//19+1}页)"
                )

        # 每日鸽报
        if True:
            # 由于有「11.5期」这样的实例,要考虑一天发多期的情况
            daily_dovess = self.db.get_responses_match(
                self.date, 36939614, r'^Daily Dove 每日鸽报.*?<br />\r?$')
            daily_dove_dict = OrderedDict()
            for daily_dove in daily_dovess:
                lines = daily_dove[1].splitlines()
                if len(lines) < 2:
                    continue
                line2 = lines[1]
                m = re.search(r'第(\S*?)期\s*?<br />', line2)
                if m is None:
                    issue = None
                else:
                    issue = m.group(1)
                daily_dove_dict[issue] = daily_dove
            for issue, daily_dove in daily_dove_dict.items():
                if issue is None:
                    issue_text = ''
                else:
                    issue_text = f"〔第{issue}期〕"
                stuff.append(
                    f"每日鸽报{issue_text}:>>No.{daily_dove[0]} (位于原串第{(daily_dove[2]-1)//19+1}页)"
                )

        # 有趣团推荐报
        if True:
            third_newspapers = self.db.get_responses_match(
                self.date, 37777146, r'^『.*?报.*?』.*?<br />\r?$')
            third_newspaper_name = None
            if len(third_newspapers) > 0:
                # 假设每日最多一期
                third_newspaper = third_newspapers[-1]
                line1 = third_newspaper[1].splitlines()[0]
                m = re.search(r'『(.*?报)(.*?)』:?(.*?)<br />', line1)
                if m is not None:  # always true
                    third_newspaper_name = m.group(1)
                    issue_text = ''
                    subhead = m.group(2)
                    if len(subhead) <= 5:
                        issue_text = subhead
                    else:
                        issue_text = subhead[:5] + "…"
                    issue_date = m.group(3)
                    if len(issue_date) > 0:
                        if issue_text != "":
                            issue_text += " "
                        if len(issue) <= 5:
                            issue_text += issue_date
                        else:
                            issue_text += issue_date[:5] + "…"
                    if issue_text != "":
                        issue_text = f"〔{issue_text}〕"
                    stuff.append(
                        f"{third_newspaper_name}{issue_text}:>>No.{third_newspaper[0]} (位于原串第{(third_newspaper[2]-1)//19+1}页)"
                    )

        if len(stuff) == 0:
            return None
        return '\n'.join(["当日刊物:"] + stuff) + '\n'
Beispiel #22
0
# Having built with `yarn build`, run `python3 src/tools/count-build.py extension/background.js`

import sys
from typing import OrderedDict
prefix ="  !*** "
suffix = " ***!"

line_counts = OrderedDict()
active_file = None
active_line_count = None

def store_line_count():
    if active_file and active_line_count:
        line_counts[active_file] = active_line_count

with open(sys.argv[1], 'r') as f:
    for line in f.readlines():
        if line.startswith(prefix):
            store_line_count()
            active_file = line[len(prefix):-len(suffix)]
            active_line_count = 0
        elif active_file:
            active_line_count += 1
    store_line_count()

print("\n".join(( " " + str(line_count).ljust(10) + file_name) for file_name, line_count in line_counts.items()))
Beispiel #23
0
def main():

    print("\n Paddlepaddle version: {}\n".format(paddle.__version__))

    args = parse_args()

    # 初始化并行环境
    # dist.init_parallel_env()

    # 加载数据集
    train_dataset = paddle.vision.datasets.MNIST(mode='train',
                                                 transform=ToTensor())
    val_dataset = paddle.vision.datasets.MNIST(mode='test',
                                               transform=ToTensor())

    train_loader = paddle.io.DataLoader(train_dataset,
                                        batch_size=args.batch_size,
                                        shuffle=True)
    test_loader = paddle.io.DataLoader(val_dataset, batch_size=args.batch_size)

    # 模型搭建
    mnist = Mnist()
    paddle.summary(net=mnist, input_size=(-1, 1, 28, 28))
    # 增加paddle.DataParallel封装
    # mnist = paddle.DataParallel(mnist)

    optim = paddle.optimizer.Adam(parameters=mnist.parameters())
    loss_fn = paddle.nn.CrossEntropyLoss()

    start_epoch = 0
    epochs = args.epochs

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        info = np.load('./weights/info.npy', allow_pickle=True).item()
        start_epoch = info['epoch'] + 1
        val_loss = info['loss']
        val_acc = info['acc']
        print('Epoch {}, validation loss is: {loss:.4f}, validation accuracy is {acc:.4f}\n'\
            .format(start_epoch,loss=val_loss,acc=val_acc))
        mnist_state_dict = paddle.load('./weights/mnist.pdparams')
        mnist.set_state_dict(mnist_state_dict)
        optim_state_dict = paddle.load('./weights/optim.pdopt')
        optim.set_state_dict(optim_state_dict)

    best_acc = 0.0
    for epoch in range(start_epoch, epochs):
        # 训练
        mnist.train()
        loader = tqdm.tqdm(train_loader)
        for batch_id, (image, label) in enumerate(loader):
            predicts = mnist(image)
            loss = loss_fn(predicts, label)
            acc = paddle.metric.accuracy(predicts, label)
            loss.backward()
            optim.step()
            optim.clear_grad()
            description = (
                'Epoch {} (loss: {loss:.4f}, acc: {acc:.4f})'.format(
                    epoch, loss=loss.numpy().item(), acc=acc.numpy().item()))
            loader.set_description(description)

        # 测试
        mnist.eval()
        losses = 0.0
        accuracy = 0.0
        count = 0
        for batch_id, (image, label) in enumerate(test_loader):
            predicts = mnist(image)
            loss = loss_fn(predicts, label)
            acc = paddle.metric.accuracy(predicts, label)
            count += 1
            losses += loss.numpy().item()
            accuracy += acc.numpy().item()
        val_loss = losses / count
        val_acc = accuracy / count
        print("Testing: loss:{loss:.4f}, acc: {acc:.4f}".format(loss=val_loss,
                                                                acc=val_acc))

        # 保存测试过程结果
        result = OrderedDict()
        result['timestamp'] = datetime.now()
        result['epoch'] = epoch
        result['loss'] = val_loss
        result['accuracy'] = val_acc

        result_dir = './result/'
        if not os.path.exists(result_dir) and result_dir != '':
            os.makedirs(result_dir)
        result_file = os.path.join(result_dir, 'valid_results.csv')
        write_heading = not os.path.exists(result_file)
        with open(result_file, mode='a') as out:
            if write_heading:
                out.write(",".join([str(k) for k, v in result.items()]) + '\n')
            out.write(",".join([str(v) for k, v in result.items()]) + '\n')

        # 保存参数
        print('Saving checkpoint..')
        state = {'epoch': epoch, 'loss': val_loss, 'acc': val_acc}
        # 目前仅支持存储 Layer 或者 Optimizer 的 state_dict 。
        np.save('./weights/info.npy', state, allow_pickle=True)  # 保存相关参数
        paddle.save(mnist.state_dict(), './weights/mnist.pdparams')
        paddle.save(optim.state_dict(), './weights/optim.pdopt')

        # 保存用于部署的模型和参数
        if val_acc > best_acc:
            best_acc = val_acc
            paddle.jit.save(
                mnist,
                './deploy/mnist',
                input_spec=[InputSpec(shape=[1, 1, 28, 28], dtype='float32')])
Beispiel #24
0
def count_matches(ed1, ed2):
    m = 0
    for e1 in ed1:
        hasmatch = False
        for e2 in ed2:
            if (e1 == e2).all():
                hasmatch = True
                break
        if hasmatch:
            m += 1
    return m


available_matches = np.zeros((len(tiles), len(tiles)))
i = 0
for id, im in tiles.items():
    edges = get_edges(im)
    j = 0
    for id2, im2 in tiles.items():
        if id != id2:
            edges2 = get_edges(im2)
            available_matches[i, j] = count_matches(edges, edges2)
        j += 1
    i += 1

indices_with_two_matches = np.where((available_matches > 0).sum(
    axis=0) == 2)[0]
prod = 1
for index, id in enumerate(tiles.keys()):
    if index in indices_with_two_matches:
        prod *= id
Beispiel #25
0
def build_model(config: Mapping, cardinalities: Mapping[str,
                                                        int]) -> keras.Model:
    """Construct model specified in the configuration.

    Also create optimizer and set the loss function.

    Args:
        config:  Dictionary representing configuration file.
        cardinalities:  Cardinalities of categorical features (needed to
            construct their embeddings).

    Return:
        Compiled model.
    """

    model_config = config['model']
    if isinstance(model_config, str):
        model = keras.models.load_model(
            model_config,
            custom_objects={'loss_fn': _create_loss(config['loss'])})

        return model

    features = Features(config['features'])
    inputs_all = []

    # Constituents of different types
    constituent_types = [
        key for key in sorted(model_config.keys())  # Ensure order
        if key not in {'head', 'load_weights'}
    ]
    outputs_constituents = []
    for constituent_type in constituent_types:
        inputs_numerical = keras.Input(
            shape=(None, len(features.numerical(constituent_type))),
            ragged=True,
            name=f'{constituent_type}_numerical')
        inputs_categorical = OrderedDict()
        for feature in features.categorical(constituent_type):
            inputs_categorical[feature] = keras.Input(shape=(None, ),
                                                      ragged=True,
                                                      name=feature)
        inputs_all.append(inputs_numerical)
        inputs_all.extend(inputs_categorical.values())

        outputs = _apply_deep_set(inputs_numerical, inputs_categorical,
                                  model_config[constituent_type],
                                  cardinalities, constituent_type)
        outputs_constituents.append(outputs)

    # Head
    inputs_global_numerical = keras.Input(shape=(len(
        features.numerical('global')), ),
                                          name='global_numerical')
    inputs_global_categorical = OrderedDict()
    for feature in features.categorical('global'):
        inputs_global_categorical[feature] = keras.Input(shape=(None, ),
                                                         name=feature)
    embeddings_global = {
        feature: Embedding(cardinalities[feature],
                           model_config['head']['embeddings'][feature],
                           name=feature + '_embeddings')(inputs)
        for feature, inputs in inputs_global_categorical.items()
    }
    inputs_all.append(inputs_global_numerical)
    inputs_all.extend(inputs_global_categorical.values())
    inputs_head = Concatenate(
        name='head_concatenate')([inputs_global_numerical] + [
            embeddings_global[feature]
            for feature in inputs_global_categorical.values()
        ] + outputs_constituents)
    outputs = _apply_dense_from_config(inputs_head,
                                       model_config['head'],
                                       name_prefix='head_')

    outputs = Dense(1, name='head_dense_output')(outputs)  # Output unit
    model = keras.Model(inputs=inputs_all, outputs=outputs, name='full')

    model.compile(optimizer=_create_optimizer(config.get('optimizer', None)),
                  loss=_create_loss(config['loss']))
    if 'load_weights' in model_config:
        # Normally, a saved model should be loaded
        # keras.models.load_model at the beginning of thsi function.
        # However, this is currently not supported for models that use
        # ragged tensors [1].  As a workaround, construct the model anew
        # and then load saved weights.  The path to weights would
        # usually be "{model_directory}/variables/variables", with the
        # ".index" file extension stripped off.  This doesn't restore
        # the state of the optimizer.
        # [1] https://github.com/tensorflow/tensorflow/issues/41034
        model.load_weights(model_config['load_weights'])
    return model
Beispiel #26
0
def switch_keys_with_values(dictionary: OrderedDictType) -> OrderedDictType:
    result = OrderedDict([(v, k) for k, v in dictionary.items()])
    return result
Beispiel #27
0
class InventoryAllocator:
    def __init__(self, inventory):
        """
        function that initializes the warehouse.
        params:
            inventory: dictionary mapping warehouse to it's inventory
        """
        self.warehouse_stock = OrderedDict()
        self.set_warehouse(inventory)

    def set_warehouse(self, inventory):
        """
        function that sets and updates inventories .
        params:
            inventory : dict
                dictionary containing information about single/multiple warehouses and their inventory
        """

        for warehouse in inventory:
            if warehouse["name"] not in self.warehouse_stock:
                self.warehouse_stock[
                    warehouse["name"]] = warehouse["inventory"]
            else:
                for item, quantity in warehouse["inventory"].items():
                    if item not in self.warehouse_stock[warehouse["name"]]:
                        self.warehouse_stock[warehouse["name"]][
                            item] = warehouse["inventory"][item]
                    else:
                        self.warehouse_stock[warehouse["name"]][item] = (
                            quantity +
                            self.warehouse_stock[warehouse["name"]][item])

    def update_stock(self, warehouse_name, item_name, item_stock):
        """
        function that updates stock in a specific warehouse.
        params:
            warehouse_name : str
                Name of warehouse to update.

            item_name : str
                Name of item to update.
            item_stock : int
                Amount of item to update.
        """
        self.warehouse_stock[warehouse_name][item_name] = item_stock

    def create_shipment_detail(self, order):
        """
        function used to find cheapest way to fullfill order
        params:
            order: A dictionary that maps item needed to amount of item needed
        """

        warehouse_item_distribution_amounts = defaultdict(lambda: {})
        for item_name, amount_required in order.items():
            item_distribution_amounts = {}

            for warehouse_name, warehouse_inventory in self.warehouse_stock.items(
            ):

                if item_name not in warehouse_inventory or warehouse_inventory[
                        item_name] <= 0:
                    continue

                item_stock = warehouse_inventory[item_name]

                if amount_required <= item_stock:
                    item_distribution_amounts[warehouse_name] = amount_required
                    item_stock -= amount_required
                    amount_required = 0
                    self.update_stock(warehouse_name, item_name, item_stock)
                    break

                elif amount_required > item_stock:
                    item_distribution_amounts[warehouse_name] = item_stock
                    amount_required -= item_stock
                    item_stock = 0
                    self.update_stock(warehouse_name, item_name, item_stock)

            if amount_required > 0:
                return []

            for warehouse_name, amount in item_distribution_amounts.items():
                warehouse_item_distribution_amounts[warehouse_name][
                    item_name] = amount

        final_allocation = []

        for name, items in warehouse_item_distribution_amounts.items():
            final_allocation.append({name: items})

        return final_allocation
Beispiel #28
0
def make_initial_simplex_table(simplex_table: OrderedDict, constraints: list,
                               f_o: dict, big_m):
    """ 
    <summary>

    </summary>
    """
    # Agarrar las variables pertinentes.
    variables = set()
    for i in constraints:
        for k in i.keys():
            variables.add(k)
    for k in f_o.keys():
        variables.add(k)

    f_o_var = set(variables) - set([x for x in f_o.keys()])
    for i in f_o_var:
        f_o.update({i: 0})

    index = 0
    for i in constraints:
        constraints_i = variables - set(i.keys())
        for j in constraints_i:
            constraints[index].update({j: 0})
        index += 1

    temp = {}
    temp.update({k: 0 for k, v in f_o.items()})
    for i in constraints:
        temp.update({k: 0 for k, v in i.items()})
    del temp['symbol']

    # Add the actual variables.
    actual_vars = []
    for k, v in temp.items():
        if (is_actual_variable(k)):
            actual_vars.append(k)
    actual_vars.sort(key=lambda x: x[1:])
    for i in actual_vars:
        simplex_table.update({i: []})
    # Add the slack/excess variables.
    s_e_vars = []
    for k, v in temp.items():
        if (is_slack_excess_variable(k)):
            s_e_vars.append(k)
    s_e_vars.sort(key=lambda x: x[1:])
    for i in s_e_vars:
        simplex_table.update({i: []})
    # Add the artifitial variables.
    a_vars = []
    for k, v in temp.items():
        if (is_artifitial_variable(k)):
            a_vars.append(k)
    a_vars.sort(key=lambda x: x[1:])
    for i in a_vars:
        simplex_table.update({i: []})

    # Add the z.
    simplex_table.update({'z': []})

    # Add the c.
    simplex_table.update({'c': []})

    # Copying the information in f_p and constraints to the simplex table:
    for k, v in simplex_table.items():
        simplex_table[k].append(f_o[k])
    for k, v in simplex_table.items():
        for i in range(len(constraints)):
            simplex_table[k].append(constraints[i][k])

    # {'X1': [-2, 0.5, 1, 1], 'X2': [-3, 0.25, 3, 1], 's1': [0, 1, 0, 0], 'e2': [0, 0, -1, 0], 'a2': [-10000, 0, 1, 0], 'a3': [-10000, 0, 0, 1], 'z': [1, 0, 0, 0], 'c': [0, 4, 20, 10], 'pivot': [0, 0, 0, 0], 'VB': [0, 0, 0, 0], 'index': [0, 1, 2, 3]}

    # Transpose.
    simplex_table_T = [{} for x in range(len(simplex_table['z']))]
    for k, v in simplex_table.items():
        for i in range(len(simplex_table_T)):
            simplex_table_T[i].update({k: v[i]})

    # new_row:
    new_row_0 = []
    for i in simplex_table_T:
        contributes_to_new_row = False
        for k, v in i.items():
            if ((k[0] == 'a') and (v != 0)):
                contributes_to_new_row = True
        if contributes_to_new_row:
            new_row_0.append(i.copy())

    new_row = []
    for i in new_row_0:
        is_first_row = False
        for k, v in i.items():
            if ((k == 'z') and (v == 1)):
                is_first_row = True
                break

        if is_first_row:
            new_row.append(i)
        else:
            new_row.append(mult_row(i, big_m))

    # Sum all the interesting rows.
    new_row_0 = {k: 0 for k, v in new_row[0].items()}
    for i in new_row:
        for k, v in i.items():
            new_row_0[k] += v

    # Make artificial letters 0.
    for k, v in new_row_0.items():
        if (k[0] == 'a'):
            new_row_0[k] = 0

    # Making the new_row_0 the new zeroth row.
    simplex_table_T[0] = new_row_0

    # Adding pivot column and VB column. Adding an index.
    index = 0
    for i in range(len(simplex_table_T)):
        simplex_table_T[i].update({'pivot': 0})
        simplex_table_T[i].update({'VB': 0})
        simplex_table_T[i].update({'index': index})
        index += 1

    for i in simplex_table_T:
        print(i)

    return simplex_table_T
Beispiel #29
0
class LRUCacheStrategy(MemoryCacheStrategy[K, V]):
    """strategy which enforces a size limit with LRU"""
    __slots__ = ("storage", "lock", "max_entries")

    storage: OrderedDict[K, V]

    lock: Lock  # OrderedDict is not thread safe

    max_entries: int

    def __init__(self, max_entries: int) -> None:
        self.storage = OrderedDict()
        self.lock = Lock()
        self.max_entries = max_entries

    def __eq__(self, other: object) -> bool:
        if isinstance(other, LRUCacheStrategy):
            return self.storage == other.storage \
                and self.max_entries == other.max_entries
        return NotImplemented

    def __getitem__(self, key: K) -> V:
        """get a value, setting it as the most recently used one"""
        with self.lock:
            self.storage.move_to_end(
                key, last=False)  # higher index = longer time since last use
            return self.storage[key]

    def __setitem__(self, key: K, value: V) -> None:
        """set a value, removing old ones if necessary"""
        with self.lock:
            if key not in self.storage and len(
                    self.storage) == self.max_entries:
                self.storage.popitem(
                )  # make space for new entry by removing the last element
            self.storage[key] = value

    def __delitem__(self, key: K) -> None:
        """remove a value"""
        with self.lock:
            del self.storage[key]

    def __iter__(self) -> Iterator[K]:
        return iter(self.storage)

    def __len__(self) -> int:
        return len(self.storage)

    def __contains__(self, key: object) -> bool:
        return key in self.storage

    def keys(self) -> KeysView[K]:
        return self.storage.keys()

    def values(self) -> ValuesView[V]:
        return self.storage.values()

    def items(self) -> ItemsView[K, V]:
        return self.storage.items()

    def peek(self, key: K) -> V:
        """get the value of key without triggering side effects like changing its priority"""
        with self.lock:
            return self.storage[key]

    @overload
    def pop(self, key: K) -> V:
        ...

    @overload
    def pop(self, key: K, default: Union[V, T] = ...) -> Union[V, T]:
        ...

    def pop(self,
            key: K,
            default: Union[V,
                           T] = POP_SENTINEL) -> Union[V, T]:  # type: ignore
        """remove a value and return it"""
        with self.lock:
            if default is POP_SENTINEL:
                return self.storage.pop(key)
            return self.storage.pop(key, default)

    def popitem(self) -> Tuple[K, V]:
        """remove the least recently used key-value pair and return it"""
        with self.lock:
            return self.storage.popitem()

    def clear(self) -> None:
        """remove all values"""
        with self.lock:
            self.storage.clear()