def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
    node_list = []

    def BFS(root):
        queue = deque()
        queue.append((root, 0, 0))
        while queue:
            node, row, column = queue.popleft()
            if node:
                node_list.append((column, row, node.val))
                queue.append((node.left, row + 1, column - 1))
                queue.append((node.right, row + 1, column + 1))

    # step 1 - construct the node list with coordinates
    BFS(root)

    # step 2 - sort the node list according to coordinates
    node_list.sort()

    # step 3 - retrive sorted results partitioned by columns
    # use an ordered dictionary for keys
    output = OrderedDict()
    for column, row, value in node_list:
        if not column in output:
            output[column] = [value]
        else:
            output[column] = output[column] + [value]

    return output.values()
    def merge_interactive(other: Optional['NodeTemplate'],
                          func: Callable[[PythonNode], None],
                          inputs: List[Tuple[str, Type]],
                          outputs: List[Tuple[str, Type]],
                          name: str = None,
                          in_port_size: int = 0,
                          latency: Latency = None,
                          lvl: int = logging.ERROR,
                          tags: List[str] = None) -> BodyTemplate:
        """Create a :py:class:`BodyTemplate` for the bodies and constructor
        created using the ``@Interactive`` decorator. Associate this with an
        existing or newly created ``NodeTemplate``.
        """
        if len(inputs) + len(outputs) == 0:
            raise data_types.DeltaIOError('Interactive node must have either an input '
                                          'or an output. Otherwise it may freeze '
                                          'the runtime simulator.')

        if not isinstance(inputs, list):
            raise TypeError('Please provide types of input parameters as list')

        inputs = inputs_as_delta_types(OrderedDict(inputs))
        outputs = outputs_as_delta_types(OrderedDict(outputs))
        body_template = InteractiveBodyTemplate(name, latency, lvl, func, tags)
        return NodeTemplate._standardised_merge(other,
                                                body_template,
                                                node_key=None,
                                                in_port_size=in_port_size,
                                                inputs=inputs,
                                                outputs=outputs)
    def _parse_losses(self, losses):
        """Parse the raw outputs (losses) of the network.

        Args:
            losses (dict): Raw output of the network, which usually contain
                losses and other necessary information.

        Returns:
            tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
                which may be a weighted sum of all losses, log_vars contains \
                all the variables to be sent to the logger.
        """
        log_vars = OrderedDict()
        for loss_name, loss_value in losses.items():
            if isinstance(loss_value, torch.Tensor):
                log_vars[loss_name] = loss_value.mean()
            elif isinstance(loss_value, list):
                log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
            else:
                raise TypeError(
                    f'{loss_name} is not a tensor or list of tensor')

        loss = sum(_value for _key, _value in log_vars.items()
                   if 'loss' in _key)

        log_vars['loss'] = loss
        for loss_name, loss_value in log_vars.items():
            if dist.is_available() and dist.is_initialized():
                loss_value = loss_value.data.clone()
                dist.all_reduce(loss_value.div_(dist.get_world_size()))
            log_vars[loss_name] = loss_value.item()

        return loss, log_vars
Beispiel #4
0
    def __init__(self, block_units, width_factor):
        super().__init__()
        width = int(64 * width_factor)
        self.width = width

        # fmt: off
        self.root = nn.Sequential(OrderedDict([
            ('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)),
            ('gn', nn.GroupNorm(32, width, eps=1e-6)),
            ('relu', nn.ReLU(inplace=True)),
        ]))

        self.body = nn.Sequential(OrderedDict([
            ('block1', nn.Sequential(OrderedDict(
                [('unit1', PreActBottleneck(cin=width, cout=width*4, cmid=width))] +
                [(f'unit{i:d}', PreActBottleneck(cin=width*4, cout=width*4, cmid=width)) for i in range(2, block_units[0] + 1)],
                ))),
            ('block2', nn.Sequential(OrderedDict(
                [('unit1', PreActBottleneck(cin=width*4, cout=width*8, cmid=width*2, stride=2))] +
                [(f'unit{i:d}', PreActBottleneck(cin=width*8, cout=width*8, cmid=width*2)) for i in range(2, block_units[1] + 1)],
                ))),
            ('block3', nn.Sequential(OrderedDict(
                [('unit1', PreActBottleneck(cin=width*8, cout=width*16, cmid=width*4, stride=2))] +
                [(f'unit{i:d}', PreActBottleneck(cin=width*16, cout=width*16, cmid=width*4)) for i in range(2, block_units[2] + 1)],
                ))),
        ]))
def create_clob_data_object():
    logging.debug('------------------------------------------------------------')
    logging.debug('Inside: ParseDictionary::run()')
    logging.debug("Loading the data file")

    # Phone Data
    phone01 = OrderedDict([('id', '1'), ('phoneNumber', '(605) 336-2880'), ('isInherited', 'Y')])
    phones = OrderedDict([('phone', phone01)])

    # Email Data
    email01 = OrderedDict([('id', '1'), ('address', '*****@*****.**'), ('isInherited', 'Y')])
    emails = OrderedDict([('email', email01)])

    # Education data
    education01 = OrderedDict([('type', 'LE'), ('schoolName', 'Nicholaus Copernicus'), ('graduationDate', '1952')])
    education02 = OrderedDict([('type', 'LW'), ('schoolName', 'Happy LAw School'), ('graduationDate', '1991')])
    education03 = OrderedDict([('type', 'LQ'), ('schoolName', 'Law School Number 1'), ('graduationDate', '1911')])
    educations = OrderedDict([('education', [education01, education02, education03])])

    clob_object = OrderedDict([('arbitrator', OrderedDict([('profileUuid', 'Iaac53e00bd9a11de9b8c850332338889'),
                                                           ('phones', phones),
                                                           ('emails', emails),
                                                           ('educations', educations),
                                                           ('statusType', 'A')]))])

    # print(json.dumps(clob_object, indent=2))
    return clob_object
Beispiel #6
0
    def messages_per_actors_per_weekday(self, chats: List[Chat]) -> None:
        """
        """
        dataframes: Dict[Chat, DataFrame] = {}
        title = 'Participation Status (Messages per Actors per Weekday)'

        bars = ['Qtd_messages']

        types = {
            'User Messages': 'messages',
            'System Messages': 'system_messages'
        }

        msg = 'Choose the message type:'
        result = select(msg, list(types.keys())).ask()

        message_type = types[result]

        for chat in chats:
            data = OrderedDict({weekday: 0 for weekday in weekdays})

            for message in getattr(chat, message_type):
                data[message.created_at.strftime('%A')] += 1

            index = list(data.keys())
            rows = list(data.values())

            dataframe = DataFrame(rows, index=index, columns=bars)
            dataframes[chat] = dataframe

        generate_chart(dataframes, bars=bars, lines=[], title=title)
Beispiel #7
0
def check_netapp_api_snapvault(
    item: str,
    params: Mapping[str, Any],
    section: SectionSingleInstance,
) -> CheckResult:
    snapvault = section.get(item)
    if not snapvault:
        return

    for key in ["source-system", "destination-system", "policy", "status", "state"]:
        if key in snapvault:
            yield Result(state=State.OK, summary="%s: %s" % (key.title(), snapvault[key]))

    if 'lag-time' not in snapvault:
        return

    lag_time = int(snapvault['lag-time'])

    policy_lag_time = OrderedDict(params.get('policy_lag_time', []))
    levels = policy_lag_time.get(snapvault.get('policy'))

    if not levels:
        levels = params.get('lag_time', (None, None))

    yield from check_levels(
        value=lag_time,
        levels_upper=levels,
        render_func=render.timespan,
        label='Lag time',
    )
Beispiel #8
0
 def load(self, config: OrderedDict) -> "IUHistory":
     """Load config data"""
     if config is None:
         config = {}
     self._history_span = timedelta(days=config.get(CONF_HISTORY_SPAN, 7))
     self._history_refresh = timedelta(seconds=config.get(CONF_HISTORY_REFRESH, 120))
     return self
Beispiel #9
0
 def set_parameters(self, parameters):
     # generator
     g_par = parameters[:self.g_w_l].copy()
     params_dict = zip(self.generator.state_dict().keys(), g_par)
     g_state_dict = OrderedDict(
         {k: torch.Tensor(v)
          for k, v in params_dict})
     # discriminator
     d_par = parameters[self.g_w_l:int(self.g_w_l + self.d_w_l)].copy()
     params_dict = zip(self.discriminator.state_dict().keys(), d_par)
     d_state_dict = OrderedDict(
         {k: torch.Tensor(v)
          for k, v in params_dict})
     # encoder
     e_par = parameters[int(self.g_w_l + self.d_w_l):].copy()
     params_dict = zip(self.encoder.state_dict().keys(), e_par)
     e_state_dict = OrderedDict(
         {k: torch.Tensor(v)
          for k, v in params_dict})
     # checking for null weights
     g_state_dict = check_weights_dict(g_state_dict)
     d_state_dict = check_weights_dict(d_state_dict)
     e_state_dict = check_weights_dict(e_state_dict)
     # assigning weights
     self.generator.load_state_dict(g_state_dict, strict=True)
     self.discriminator.load_state_dict(d_state_dict, strict=True)
     self.encoder.load_state_dict(e_state_dict, strict=True)
Beispiel #10
0
async def async_setup(hass: HomeAssistant, config):
    """Register custom view which includes request in context"""
    # Because we start after auth, we have access to store_result
    store_result = hass.data[AUTH_DOMAIN]
    # Remove old LoginFlowIndexView
    for route in hass.http.app.router._resources:
        if route.canonical == "/auth/login_flow":
            _LOGGER.debug("Removed original login_flow route")
            hass.http.app.router._resources.remove(route)
    _LOGGER.debug("Add new login_flow route")
    hass.http.register_view(
        RequestLoginFlowIndexView(hass.auth.login_flow, store_result,
                                  config[DOMAIN]["debug"]))

    # Inject Auth-Header provider.
    providers = OrderedDict()
    provider = headers.HeaderAuthProvider(
        hass,
        hass.auth._store,
        config[DOMAIN],
    )
    providers[(provider.type, provider.id)] = provider
    providers.update(hass.auth._providers)
    hass.auth._providers = providers
    _LOGGER.debug("Injected auth_header provider")
    return True
Beispiel #11
0
def conditions_expr(dataset,
                    conditions,
                    query: Query,
                    parsing_context: ParsingContext,
                    depth=0):
    """
    Return a boolean expression suitable for putting in the WHERE clause of the
    query.  The expression is constructed by ANDing groups of OR expressions.
    Expansion of columns is handled, as is replacement of columns with aliases,
    if the column has already been expanded and aliased elsewhere.
    """
    from snuba.clickhouse.columns import Array

    if not conditions:
        return ''

    if depth == 0:
        # dedupe conditions at top level, but keep them in order
        sub = OrderedDict(
            (conditions_expr(dataset, cond, query, parsing_context, depth + 1),
             None) for cond in conditions)
        return u' AND '.join(s for s in sub.keys() if s)
    elif is_condition(conditions):
        lhs, op, lit = dataset.process_condition(conditions)

        # facilitate deduping IN conditions by sorting them.
        if op in ('IN', 'NOT IN') and isinstance(lit, tuple):
            lit = tuple(sorted(lit))

        # If the LHS is a simple column name that refers to an array column
        # (and we are not arrayJoining on that column, which would make it
        # scalar again) and the RHS is a scalar value, we assume that the user
        # actually means to check if any (or all) items in the array match the
        # predicate, so we return an `any(x == value for x in array_column)`
        # type expression. We assume that operators looking for a specific value
        # (IN, =, LIKE) are looking for rows where any array value matches, and
        # exclusionary operators (NOT IN, NOT LIKE, !=) are looking for rows
        # where all elements match (eg. all NOT LIKE 'foo').
        columns = dataset.get_dataset_schemas().get_read_schema().get_columns()
        if (isinstance(lhs, str) and lhs in columns
                and isinstance(columns[lhs].type, Array)
                and columns[lhs].base_name != query.get_arrayjoin()
                and not isinstance(lit, (list, tuple))):
            any_or_all = 'arrayExists' if op in POSITIVE_OPERATORS else 'arrayAll'
            return u'{}(x -> assumeNotNull(x {} {}), {})'.format(
                any_or_all, op, escape_literal(lit),
                column_expr(dataset, lhs, query, parsing_context))
        else:
            return u'{} {} {}'.format(
                column_expr(dataset, lhs, query, parsing_context), op,
                escape_literal(lit))

    elif depth == 1:
        sub = (conditions_expr(dataset, cond, query, parsing_context,
                               depth + 1) for cond in conditions)
        sub = [s for s in sub if s]
        res = u' OR '.join(sub)
        return u'({})'.format(res) if len(sub) > 1 else res
    else:
        raise InvalidConditionException(str(conditions))
Beispiel #12
0
    def containsNearbyAlmostDuplicate(self, nums: List[int], k: int,
                                      t: int) -> bool:
        n = len(nums)
        if n == 0 or k <= 0 or t < 0: return False

        d = OrderedDict()

        for i, e in enumerate(nums):
            pool = [num[0] for num in d.keys()]
            pool.sort()
            print(f"Keys are: {' '.join(map(str, pool))}")
            l_b = bisect.bisect_left(pool, e)

            print(f"Foud lower bound {l_b} for e={e} at i={i}")
            if l_b < len(pool) and pool[l_b] <= e + t: return True
            u_b = bisect.bisect_right(pool, e - t - 1)

            print(f"Foud upper bound {u_b} for e={e - t - 1} at i={i}")
            if u_b < len(pool) and pool[u_b] >= e - t and pool[u_b] <= e:
                return True

            if len(d) == k:
                d.popitem(last=False)

            d[(e, i)] = i

        return False
Beispiel #13
0
def format_image_link(line: OrderedDict):
    image_pattern = re.compile(r'.*"(?P<link>.*)".*')
    image: str = line.pop('image')
    res = image_pattern.search(image)
    link = res.group('link')
    line.update({'image': link})
    return line
Beispiel #14
0
def infer_model_type(checkpoint_weights: OrderedDict,
                     attention_mechanism: bool) -> (str, bool):
    """
    Function to infer the model type using the weights matrix.
    We first try to use the "model_type" key added by our retrain process.
    If this fails, we infer it using our knowledge of the layers' names.
    For example, BPEmb model uses an embedding network, thus, if `embedding_network.model.weight_ih_l0` is present,
    we can say that it is such a type; otherwise, it is a FastText model.
    Finally, to handle the attention model, we use a similar approach but using the
    `decoder.linear_attention_mechanism_encoder_outputs.weight` layer name to deduct the presence of
    attention mechanism.

    Args:
        checkpoint_weights (OrderedDict): The weights matrix.
        attention_mechanism (bool): Either or not the model uses an attention mechanism or not.

    Return:
        A tuple where the first element is the model_type name and the second element is the attention_mechanism flag.

    """
    inferred_model_type = checkpoint_weights.get("model_type")
    if inferred_model_type is not None:
        model_type = inferred_model_type
    else:
        if "embedding_network.model.weight_ih_l0" in checkpoint_weights.keys():
            model_type = "bpemb"
        else:
            model_type = "fasttext"

    if "decoder.linear_attention_mechanism_encoder_outputs.weight" in checkpoint_weights.keys(
    ):
        attention_mechanism = True

    return model_type, attention_mechanism
Beispiel #15
0
    def __init__(self, fileName):
        self.fileName = fileName

        self.points = OrderedDict()
        self.pointSets = OrderedDict()
        self.updated = {}
        self.ptSetNames = []
Beispiel #16
0
    def run3(self):
        logging.debug('------------------------------------------------------------')
        logging.debug('Inside: ParseDictionary::run()')
        logging.debug("Loading the data file")

        # Phone Data
        phone01 = OrderedDict([('id', '1'), ('phoneNumber', '(605) 336-2880'), ('isInherited', 'Y')])
        phones = OrderedDict([('phone', phone01)])

        # Email Data
        email01 = OrderedDict([('id', '1'), ('address', '*****@*****.**'), ('isInherited', 'Y')])
        emails = OrderedDict([('email', email01)])

        # Education data
        education01 = OrderedDict([('type', 'LE'), ('schoolName', 'Nicholaus Copernicus'), ('graduationDate', '1952')])
        education02 = OrderedDict([('type', 'LW'), ('schoolName', 'Happy LAw School'), ('graduationDate', '1991')])
        education03 = OrderedDict([('type', 'LQ'), ('schoolName', 'Law School Number 1'), ('graduationDate', '1911')])
        educations = OrderedDict([('education', [education01, education02, education03   ]   )])

        allOracle88 = OrderedDict([('arbitrator', OrderedDict([('profileUuid', 'Iaac53e00bd9a11de9b8c850332338889'),
                                                               ('phones', phones),
                                                               ('emails', emails),
                                                               ('educations', educations),
                                                               ('statusType', 'A') ])  )])

        print(json.dumps(allOracle88, indent=2))

        # Note - we might want to use recussion to iterationt his stuff
        # We are fun playing here

        logging.debug("Start parsing ... ")

        for key in allOracle88:
            logging.debug("index: %s", key)

            value = allOracle88[key]

            # test the value of the object
            # if the value is a collections.OrderedDict we will
            # iterate on the object again - this is where the recussion can come in

            logging.debug("Determine the object type")
            logging.debug("Object type: %s", type(value))
            logging.debug("Data value: %s", value)

            if isinstance(value, (collections.OrderedDict)):
                # The object is a collection we will continuie
                # to iterate

                for key in value:
                    logging.debug("index: %s", key)

                    value = value[key]
                    logging.debug("Determine the object type")
                    logging.debug("Object type: %s", type(value))
                    logging.debug("Data value: %s", value)


        logging.debug("Done")
Beispiel #17
0
 def freeze(self) -> IPInfoFrozen:
     od = OrderedDict()
     od['inbound'] = self.inbound
     od['forward'] = self.forward
     od['reverse'] = self.reverse
     od.update(self.time_window.freeze())
     return CommentedMap(
         od)  # Hack: prevents !!omap annotation in YAML output
Beispiel #18
0
 def _make_closure(self, kwargs: OrderedDict, optimizer: Optimizer) -> Closure:
     """Build a closure object that captures the given arguments and runs the `training_step` function and
     optionally other functions such as `backward` and `zero_grad`."""
     opt_idx = kwargs.get("optimizer_idx", 0)
     step_fn = self._make_step_fn(kwargs)
     backward_fn = self._make_backward_fn(optimizer, opt_idx)
     zero_grad_fn = self._make_zero_grad_fn(kwargs.get("batch_idx", 0), opt_idx, optimizer)
     return Closure(step_fn=step_fn, backward_fn=backward_fn, zero_grad_fn=zero_grad_fn)
Beispiel #19
0
 def validate(self, data: OrderedDict) -> OrderedDict:
     # Take out fields that cannot be handled by standard serializer logic
     self.code = data.pop('code')
     self.name = data.pop('name')
     self.course_url = data.pop('course_url')
     self.teachers = data.pop('teachers')
     data = super().validate(data)
     return data
Beispiel #20
0
 def __init__(self, inventory):
     """
     function that initializes the warehouse.
     params:
         inventory: dictionary mapping warehouse to it's inventory
     """
     self.warehouse_stock = OrderedDict()
     self.set_warehouse(inventory)
 def __init__(self, source, target_source, id, title, link, abstract, details):
     self.dct = OrderedDict()
     self.dct['source'] = source
     self.dct['target_source'] = target_source
     self.dct['id'] = id
     self.dct['title'] = title
     self.dct['link'] = link
     self.dct['abstract'] = abstract
     self.dct['details'] = details
Beispiel #22
0
 def __init__(self, scans, binds=True, **kwargs):
     self._scans = sorted(scans, key=lambda x: x.scan_time)
     self._binds = binds
     self._producer = None
     self._scan_id_map = dict()
     self._scan_index_map = OrderedDict()
     self._index = OrderedDict()
     self._build_indices()
     self.make_iterator()
Beispiel #23
0
 def load(self, config: OrderedDict):
     """Load config data for the controller"""
     self.clear()
     self._is_enabled = config.get(CONF_ENABLED, True)
     self._name = config.get(CONF_NAME, f"Controller {self._controller_index + 1}")
     self._switch_entity_id = config.get(CONF_ENTITY_ID)
     self._preamble = wash_td(config.get(CONF_PREAMBLE))
     self._postamble = wash_td(config.get(CONF_POSTAMBLE))
     self._dirty = True
     return self
Beispiel #24
0
    def __init__(self, xetters):
        """Constructor

        Parameters
        ----------

        xetters: mapping of register number (integer 0-127) to a subclass of Xetter.
        """
        super().__init__()
        self.xetters = OrderedDict(xetters)
Beispiel #25
0
    def load(self, config: OrderedDict):
        """ Load zone data from the configuration"""
        self.clear()

        self._is_enabled = config.get(CONF_ENABLED, True)
        self._name = config.get(CONF_NAME, f"Zone {self.zone_index + 1}")
        self._switch_entity_id = config.get(CONF_ENTITY_ID)
        self._adjustment.load(config)
        self._dirty = True
        return self
Beispiel #26
0
    def process(self, data: OrderedDict) -> OrderedDict:
        """
        Processing PyTorch model parameter.
        The data is a state_dict of a PyTorch model.
        """
        new_data = OrderedDict()
        for layer_name, layer_params in data.items():
            new_data[layer_name] = self._process_layer(layer_params)

        return new_data
    def Serialize(self) -> OrderedDict:

        node = OrderedDict(
            {
                "nodeId": self._nodeId,
                "posX": self.pos().x(),
                "posY": self.pos().y(),
            }
        )
        node.update(self._nodeSocketManager.Serialize())
        return node
    def get_paginated_response(self, paginator, response_schema):
        """
        :param BasePagination paginator: the paginator
        :param openapi.Schema response_schema: the response schema that must be paged.
        :rtype: openapi.Schema
        """

        return openapi.Schema(
            type=openapi.TYPE_OBJECT,
            properties=OrderedDict((
                (
                    "pagination",
                    openapi.Schema(
                        type=openapi.TYPE_OBJECT,
                        properties=OrderedDict((
                            ("next", openapi.Schema(type=openapi.TYPE_NUMBER)),
                            (
                                "previous",
                                openapi.Schema(type=openapi.TYPE_NUMBER),
                            ),
                            ("count",
                             openapi.Schema(type=openapi.TYPE_NUMBER)),
                            (
                                "current",
                                openapi.Schema(type=openapi.TYPE_NUMBER),
                            ),
                            (
                                "total_pages",
                                openapi.Schema(type=openapi.TYPE_NUMBER),
                            ),
                            (
                                "start_index",
                                openapi.Schema(type=openapi.TYPE_NUMBER),
                            ),
                            (
                                "end_index",
                                openapi.Schema(type=openapi.TYPE_NUMBER),
                            ),
                        )),
                        required=[
                            "next",
                            "previous",
                            "count",
                            "current",
                            "total_pages",
                            "start_index",
                            "end_index",
                        ],
                    ),
                ),
                ("results", response_schema),
            )),
            required=["results", "pagination"],
        )
Beispiel #29
0
    def _build_indices(self):
        self._scan_id_map = dict()
        self._scan_index_map = OrderedDict()
        self._index = OrderedDict()

        for scan in self._scans:
            if self._binds:
                scan.bind(self)
            self._scan_id_map[scan.id] = scan
            self._scan_index_map[scan.index] = scan
            self._index[scan.id] = scan.index
 def assertOrderedDictEqual(od1: OrderedDict, od2: OrderedDict):
     errors = []
     if len(od1.keys()) != len(od2.keys()):
         raise AssertionError("Number of items don't match: {} {}".format(od1, od2))
     for i, j in zip(od1.items(), od2.items()):
         if i[0] != j[0]:
             errors.append("Keys in {} and {} don't match".format(i, j))
         if i[1] != j[1]:
             errors.append("Values in {} and {} don't match".format(i, j))
     if errors:
         raise AssertionError(", ".join(errors))
     return True