def structural(p):
    print(len(p))
    dataclasses.fields(p)

    dataclasses.asdict(p)
    dataclasses.astuple(p)
    dataclasses.replace(p)
Esempio n. 2
0
    def insert(self, product: Product, *, overwrite=False):
        table = tables[product.__class__]
        c = self.connection.cursor()
        data = astuple(product)
        if len(data) > 1:
            part_2 = ", ?" * (len(data) - 1)
            part_2 = f"(?{part_2})"
        else:
            part_2 = "?"

        if not overwrite:
            part_1 = f"INSERT INTO {table} VALUES "
            order = part_1 + part_2
            c.execute(order, data)
        else:
            part_1 = f"INSERT OR REPLACE INTO {table} VALUES "
            order = part_1 + part_2
            c.execute(order, data)

        self.connection.commit()
def copy_files(exp_params: ExperimentParameters) -> Tuple[Path, Path]:
    _, measure, strategy, weight, learning_based, auc = astuple(exp_params)
    filepath = exp_params.filepath
    path: Path = filepath.parent
    file, filename = filepath.name, filepath.stem

    if learning_based:
        new_folder_path = path / f"{filename}_{measure}_{strategy}_{weight}_{auc}"
    else:
        new_folder_path = path / f"{filename}_{measure}"

    new_folder_path.mkdir()
    new_filepath = new_folder_path / file
    copyfile(str(filepath), new_filepath)

    old_clustered_filepath = path / f"{filename}_{measure}_{strategy}_{weight}_{auc}_clustered.arff"
    new_clustered_filepath = new_folder_path / f"{filename}.clus"
    copyfile(str(old_clustered_filepath), new_clustered_filepath)
    old_clustered_filepath.unlink()
    return new_filepath, new_clustered_filepath
Esempio n. 4
0
def dataclassToJson(objectClass: dataclass,
                    objects: List[dataclass],
                    filePrefix: str,
                    sort=True) -> None:
    try:
        sort and objects.sort()
    except TypeError:  # '<' not supported between instances of 'objectClass' and 'objectClass'
        print(f'Warning: {objectClass.__name__} is not sorted.')

    json.dump([asdict(o) for o in objects],
              open(f'{filePrefix}.json', 'w'),
              indent=4)

    json.dump(
        {
            'keys': [f.name for f in fields(objectClass)],
            'values': [astuple(o) for o in objects]
        },
        open(f'{filePrefix}.min.json', 'w'),
        separators=(',', ':'))
Esempio n. 5
0
    def __handle_single_requirement(
            self, package: PackageInfo,
            requirement: Requirement) -> Tuple[bool, str]:
        package_name, latest_version, _, current_version, _ = astuple(package)

        if requirement.ignore:
            message = Messages.IGNORED.format(package=requirement.package)
            return False, message

        if not all([latest_version, current_version]):
            message = Messages.CANNOT_FETCH.format(package=package_name,
                                                   version=requirement.version)
            return False, message

        if latest_version > current_version:
            return self._is_rotten(package)

        message = Messages.NOT_ROTTEN.format(package=requirement.package,
                                             version=str(current_version))
        return False, message
Esempio n. 6
0
    def as_path(self) -> SVGPath:
        *shape_fields, x, y, w, h, rx, ry = dataclasses.astuple(self)
        path = SVGPath()
        path.M(x + rx, y)
        path.H(x + w - rx)
        if rx > 0:
            path.A(rx, ry, x + w, y + ry)
        path.V(y + h - ry)
        if rx > 0:
            path.A(rx, ry, x + w - rx, y + h)
        path.H(x + rx)
        if rx > 0:
            path.A(rx, ry, x, y + h - ry)
        path.V(y + ry)
        if rx > 0:
            path.A(rx, ry, x + rx, y)
        path.end()
        path._copy_common_fields(*shape_fields)

        return path
Esempio n. 7
0
def CompileSpec(*args, **kwargs):
    """
    CompileSpec specifies the model information.
    Example:
    cs = CompileSpec(
            inputs=(
                TensorSpec(
                    shape=[1, 3, 224, 224],
                ),
            ),
            outputs=(
                TensorSpec(
                    shape=[1, 1000],
                ),
            ),
            backend=CoreMLComputeUnit.CPU,
            allow_low_precision=True,
    ),
    """
    return astuple(_CompileSpec(*args, **kwargs))
Esempio n. 8
0
def simulate(initial_state, config, intervention=None, seed=None):
    """Simulate a run of the Zika simulator model.

    The simulation starts at initial_state at time 0, and evolves the state
    using dynamics whose parameters are specified in config.

    Parameters
    ----------
        initial_state:  `whynot.simulators.zika.State`
            Initial State object, which is used as x_{t_0} for the simulator.
        config:  `whynot.simulators.zika.Config`
            Config object that encapsulates the parameters that define the dynamics.
        intervention: `whynot.simulators.zika.Intervention`
            Intervention object that specifies what, if any, intervention to perform.
        seed: int
            Seed to set internal randomness. The simulator is deterministic, so
            the seed parameter is ignored.

    Returns
    -------
        run: `whynot.dynamics.Run`
            Rollout of the model.

    """
    # Simulator is deterministic, so seed is ignored
    # pylint: disable-msg=unused-argument
    t_eval = np.arange(
        config.start_time, config.end_time + config.delta_t, config.delta_t
    )

    solution = odeint(
        dynamics,
        y0=dataclasses.astuple(initial_state),
        t=t_eval,
        args=(config, intervention),
        rtol=config.rtol,
        atol=config.atol,
    )

    states = [initial_state] + [State(*state) for state in solution[1:]]
    return wn.dynamics.Run(states=states, times=t_eval)
Esempio n. 9
0
def add_to_list(
    current_list: HmrcTransactionLog,
    date_index: datetime.date,
    symbol: str,
    quantity: Decimal,
    amount: Decimal,
    fees: Decimal,
) -> None:
    """Add entry to given transaction log."""
    if date_index not in current_list:
        current_list[date_index] = {}
    if symbol not in current_list[date_index]:
        current_list[date_index][symbol] = HmrcTransactionData(
            quantity=Decimal(0), amount=Decimal(0), fees=Decimal(0))
    current_quantity, current_amount, current_fees = astuple(
        current_list[date_index][symbol])
    current_list[date_index][symbol] = HmrcTransactionData(
        quantity=current_quantity + quantity,
        amount=current_amount + amount,
        fees=current_fees + fees,
    )
Esempio n. 10
0
    def get_line_segment(self, image):
        """ Returns the line segment that intersects the image. """
        # TODO: Fix for when line more closely aligns with y-axis
        m, n = image.shape[:2]
        a, b, c = astuple(self)

        if abs(a) < 0.01:
            print("Returning line segment parallel to y-axis.")
            x0, y0 = (0, int(-c / b))
            x1, y1 = (n, int(-c / b))
            return ((x0, y0), (x1, y1))

        if abs(b) < 0.01:
            print("Returning line segment parallel to x-axis.")
            x0, y0 = (int(-c / a), 0)
            x1, y1 = (int(-c / a), m)
            return ((x0, y0), (x1, y1))

        x0, y0 = (0, int(-c / b))
        x1, y1 = (n, int(-(a * n + c) / b))
        return ((x0, y0), (x1, y1))
Esempio n. 11
0
def insert_tile(tile: Tile) -> int:
    sql = f"""INSERT INTO tiles({','.join([*asdict(tile)])})
    VALUES ({','.join(['%s' for _ in range(len(astuple(tile)))])})
    RETURNING id"""
    conn = None
    tileId: int = -1

    try:
        conn = connect_db()
        cur = conn.cursor()
        cur.execute(sql, astuple(tile))
        tileId = cur.fetchone()[0]
        conn.commit()
        cur.close()

        return tileId
    except (Exception, psycopg2.DatabaseError) as error:
        print("insert tile: ", error)
    finally:
        if conn is not None:
            conn.close()
Esempio n. 12
0
    def step(self, op, num):
        x, y, wpdx, wpdy = dataclasses.astuple(self)
        if op in DIRS:
            dx, dy = DIRS[op]
            wpdx += dx * num
            wpdy += dy * num
        elif op == "F":
            x += wpdx * num
            y += wpdy * num
        else:
            assert op in "LR"
            assert num in [90, 180, 270]
            if num == 180:
                wpdx *= -1
                wpdy *= -1
            elif (op, num) in [("R", 90), ("L", 270)]:
                wpdx, wpdy = wpdy, -wpdx
            elif (op, num) in [("L", 90), ("R", 270)]:
                wpdx, wpdy = -wpdy, wpdx

        return self.__class__(x, y, wpdx, wpdy)
Esempio n. 13
0
 def write_results(self, prices: List):
     logging.info("Writing %s lines", len(prices))
     conn = None
     try:
         # read database configuration
         # connect to the PostgreSQL database
         conn = psycopg2.connect(host="postgres", database="fokko", user="******", password="******")
         # create a new cursor
         cur = conn.cursor()
         for price in prices:
             # execute the INSERT statement
             cur.execute(self.sql, astuple(price))
         # commit the changes to the database
         conn.commit()
         # close communication with the database
         cur.close()
     except (Exception, psycopg2.DatabaseError) as error:
         print(error)
     finally:
         if conn is not None:
             conn.close()
Esempio n. 14
0
    def step(self, op, num):
        x, y, dirx, diry = dataclasses.astuple(self)
        if op in DIRS:
            dx, dy = DIRS[op]
            x += dx * num
            y += dy * num
        elif op == "F":
            x += dirx * num
            y += diry * num
        else:
            assert op in "LR"
            assert num in [90, 180, 270]
            if num == 180:
                dirx *= -1
                diry *= -1
            elif (op, num) in [("R", 90), ("L", 270)]:
                dirx, diry = RTURNS[dirx, diry]
            elif (op, num) in [("L", 90), ("R", 270)]:
                dirx, diry = LTURNS[dirx, diry]

        return self.__class__(x, y, dirx, diry)
Esempio n. 15
0
def merge_data(parsed: List[CountyResult],
               gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
    """
    Merge together the parsed data from `parsed_data` with the GeoDataFrame
    from the Census. Note that this standardizes for the column headers the
    javascript app expects.
    """
    df = pd.DataFrame(
        [dataclasses.astuple(row) for row in parsed],
        columns=[field.name for field in dataclasses.fields(CountyResult)],
    )

    gdf = gdf.merge(df.drop(columns=["state"]), left_on="id", right_on="fips")
    gdf = gdf.rename(
        columns={
            "clinton_vote": "dem",
            "trump_vote": "gop",
            "stein_vote": "grn",
            "johnson_vote": "lib",
            "mcmullin_vote": "una",
        })
    return gdf
Esempio n. 16
0
def encode_fsobject_memo(memo: FilesystemObjectMemo) -> bytes:
    # Return a representation of *memo* as marshal-encoded tuple.

    if not isinstance(memo, FilesystemObjectMemo):
        raise TypeError

    if memo.stat is None:  # filesystem object did not exist
        return marshal.dumps(())  # != b''

    t = dataclasses.astuple(memo.stat)
    if not all(isinstance(f, int) for f in t):
        raise TypeError

    if not stat.S_ISLNK(memo.stat.mode) and memo.symlink_target is not None:
        raise ValueError

    if stat.S_ISLNK(memo.stat.mode) and not isinstance(memo.symlink_target, str):
        raise TypeError

    return marshal.dumps((
        memo.stat.mode, memo.stat.size, memo.stat.mtime_ns, memo.stat.uid, memo.stat.gid,
        memo.symlink_target))
Esempio n. 17
0
    def normalize_point_cluster(self,
                                point_cluster: PointCluster) -> PointCluster:
        """
        normalize point-cluster such that,
            - jimage of cluster[0] is (0, 0, 0)
            - points are in ascending order
        """
        # sort sites by (site_index, jimage)
        tuple_points = sorted(astuple(p) for p in point_cluster.points)
        points = [
            DerivativeSite(site_index, jimage)
            for site_index, jimage in tuple_points
        ]

        offset = np.array(points[0].jimage)
        shifed_new_points = [
            DerivativeSite(
                p.site_index,
                tuple((np.array(p.jimage, dtype=int) - offset).tolist()))
            for p in points
        ]
        return PointCluster(shifed_new_points)
Esempio n. 18
0
    def eval_features_once(self, pool, layers, tile_size=512):
        """Computes the set of feature maps for an image."""
        img_size = np.array(self.img.shape[-2:])
        ntiles = (img_size - 1) // tile_size + 1
        tile_size = img_size // ntiles
        if np.prod(ntiles) > 1:
            print('Using %dx%d tiles of size %dx%d.' %
                  (ntiles[1], ntiles[0], tile_size[1], tile_size[0]))
        features = {}
        for layer in layers:
            scale, channels = self.layer_info(layer)
            shape = (channels, ) + tuple(np.int32(np.ceil(img_size / scale)))
            features[layer] = np.zeros(shape, dtype=np.float32)
        for y in range(ntiles[0]):
            for x in range(ntiles[1]):
                xy = np.array([y, x])
                start = xy * tile_size
                end = start + tile_size
                if y == ntiles[0] - 1:
                    end[0] = img_size[0]
                if x == ntiles[1] - 1:
                    end[1] = img_size[1]
                tile = self.img[:, start[0]:end[0], start[1]:end[1]]
                pool.ensure_healthy()
                pool.request(
                    FeatureMapRequest(start, SharedNDArray.copy(tile), layers))
        pool.reset_next_worker()
        for _ in range(np.prod(ntiles)):
            start, feats_tile = astuple(pool.resp_q.get())
            for layer, feat in feats_tile.items():
                scale, _ = self.layer_info(layer)
                start_f = start // scale
                end_f = start_f + np.array(feat.array.shape[-2:])
                features[layer][:, start_f[0]:end_f[0],
                                start_f[1]:end_f[1]] = feat.array
                feat.unlink()

        return features
Esempio n. 19
0
def _make_fundamental(obj, repeatable):
    if repeatable:
        if isinstance(obj, str):
            # https://medium.com/@bdov_/https-medium-com-bdov-python-objects-part-iii-string-interning-625d3c7319de
            # CPython implementation of marshal.dumps(): https://github.com/python/cpython/blob/master/Python/marshal.c
            return b's' + obj.encode()
        elif isinstance(obj, bytes):
            return b'b' + obj

    if is_immutable_fundamental(obj):
        return obj

    r = repeatable

    if isinstance(obj, collections.abc.Mapping
                  ):  # note: loses order of collections.OrderedDict
        if r:
            return tuple(
                sorted((_make_fundamental(k, r), _make_fundamental(v, r))
                       for k, v in obj.items()))
        return {
            _make_fundamental(k, r): _make_fundamental(v, r)
            for k, v in obj.items()
        }

    if isinstance(obj, (set, frozenset)):
        obj = frozenset(_make_fundamental(k, r) for k in obj)
        if not r:
            return obj
        return tuple(sorted(obj))

    if isinstance(obj, collections.abc.Iterable):
        return tuple(_make_fundamental(k, r) for k in obj)

    if dataclasses.is_dataclass(obj) and not isinstance(obj, type):
        return dataclasses.astuple(obj)

    raise TypeError
Esempio n. 20
0
def write_records_as_csv(records: list, outfile: str) -> None:
    """
    Write the records as a csv file

    Args:
        records: a list of dataclass objects or namedtuples
            (anything with a '_fields' attribute)
        outfile: the path to save the csv file
    """
    r0 = records[0]
    if dataclasses.is_dataclass(r0):
        column_names = [field.name for field in dataclasses.fields(r0)]
        records = [dataclasses.astuple(rec) for rec in records]
    elif hasattr(r0, "_fields"):
        column_names = r0._fields
    else:
        raise TypeError("records should be a namedtuple or a dataclass")
    f = open(outfile, 'w', newline='', encoding='utf-8')
    w = _csv.writer(f)
    w.writerow(column_names)
    for record in records:
        w.writerow(record)
    f.close()
Esempio n. 21
0
def run_worker(args):
    (
        environment_name,
        species_name,
        batch_num,
        worker_num,
        num_workers,
    ) = args

    # Go through every replay and sum up stats
    env_class = get_env_module(environment_name)
    replay_directory = find_batch_directory(environment_name, species_name, batch_num)
    ws = WorkerStats()
    for agent_replay in iter_replay_data(
        replay_directory,
        env_class.State,
        worker_num,
        num_workers,
    ):
        ws.total_mcts_considerations += agent_replay.total_mcts_considerations()
        ws.num_games += 1
        ws.num_positions += len(agent_replay.positions)
    return astuple(ws)
Esempio n. 22
0
def fetchall(conn: Any,
             table: str,
             where: Optional[Dataclass],
             columns: Union[str, List[str]] = "*") -> Optional[Generator]:
    """Run a SELECT on a table. Returns all the results as a tuple!"""
    if not any(astuple(where)):
        return None
    where_ = {
        TABLEMAP[table].get(k): v
        for k, v in asdict(where).items()
        if v is not None and k in TABLEMAP[table]
    }
    sql = ""
    if columns != "*":
        sql = f"SELECT {', '.join(columns)} FROM {table} "
    else:
        sql = f"SELECT * FROM {table} "
    if where:
        sql += "WHERE "
        sql += " AND ".join(f"{k} = " "%s" for k in where_.keys())
    with conn.cursor() as cursor:
        cursor.execute(sql, tuple(where_.values()))
        return (DATACLASSMAP[table](*record) for record in cursor.fetchall())
Esempio n. 23
0
def test_TH1Axis_determination(logging_mixin, create_hist_axis_range,
                               axis_type, axis, hist_to_test, test_root_hists):
    """ Test TH1 axis determination in the HistAxisRange object. """
    import ROOT
    axis_map = {
        "x_axis": ROOT.TH1.GetXaxis,
        "y_axis": ROOT.TH1.GetYaxis,
        "z_axis": ROOT.TH1.GetZaxis,
    }
    axis = axis_map[axis]
    # Get the HistAxisRange object
    obj, object_args = create_hist_axis_range
    # Insert the proepr axis type
    obj.axis_type = axis_type
    # Determine the test hist
    hist = dataclasses.astuple(test_root_hists)[hist_to_test]

    # Check that the axis retrieved by the specified function is the same
    # as that retrieved by the HistAxisRange object.
    # NOTE: GetZaxis() (for example) is still valid for a TH1. It is a minimal axis
    #       object with 1 bin. So it is fine to check for equivalnce for axes that
    #       don't really make sense in terms of a hist's dimensions.
    assert axis(hist) == obj.axis(hist)
Esempio n. 24
0
 def __getitem__(self: "CacheDict[KT, VT]", key: KT, /) -> VT:
     log.debug("get [%r] key: [%r]", ReprWrapper(self), key)
     if not isinstance(key, self.mapping.KeyType):
         raise CacheDictKeyTypeException(
             {
                 "key": key,
                 "key_type": type(key),
                 "KT": self._get_key_type_mapping(),
             },
         )
     select_stmt = self.mapping.select_statement()
     cursor = self._execute(select_stmt, dataclasses.astuple(key), op="select")
     res: typing.Optional[CacheDictRow[KT, VT]] = cursor.fetchone()
     if not res:
         raise CacheDictNoSuchKeyException(
             {"key": key, "table": self.mapping.table_ident},
         )
     elif res.value is None:
         raise CacheDictNoneReturnedException(
             {"key": key, "table": self.mapping.table_ident},
         )
     else:
         return res.value
Esempio n. 25
0
    def __call__(self, sample: Sample):
        image, landmarks = dataclasses.astuple(sample)
        h, w = image.shape[:2]
        if isinstance(self.output_size, int):
            if h > w:
                new_h, new_w = self.output_size * (h / w), self.output_size
            else:
                new_h, new_w = self.output_size, self.output_size * (w / h)
        else:
            new_h, new_w = self.output_size

        new_h, new_w = int(new_h), int(new_w)

        new_img = transform.resize(image, (new_h, new_w))

        # h and w are swapped for landmarks because for images,
        # x and y axis are axis 1 and axis 0 respectively
        new_landmarks = landmarks * [new_w / w, new_h / h]

        return Sample(
            image=new_img,
            landmarks=new_landmarks
        )
Esempio n. 26
0
def simulate(initial_state, config, intervention=None, seed=None):
    """Simulate a run of the Chen et al. model opioid epidemic model.

    Parameters
    ----------
        initial_state: whynot.simulator.opioid.State
            Initial state of the dynamics
        config: whynot.simulator.opioid.Config
            Config object to determine simulation dynamics.
        intervention: whynot.simulator.opioid.Intervention
            (Optional) Intervention object to determine what, if any,
            intervention to perform during the rollout of the dynamics.
        seed: int
            The simulator is deterministic, so the seed parameter is ignored.

    Returns
    -------
        run: whynot.dynamics.Run
            Run object produced by running simulate for the opioid simulator

    """
    # pylint: disable-msg=unused-argument
    # pylint:disable-msg=no-member
    t_eval = np.arange(config.start_time, config.end_time + config.delta_t,
                       config.delta_t)

    solution = odeint(
        dynamics,
        y0=dataclasses.astuple(initial_state),
        t=t_eval,
        args=(config, intervention),
        rtol=1e-4,
        atol=1e-4,
    )

    states = [initial_state] + [State(*state) for state in solution[1:]]
    return wn.dynamics.Run(states=states, times=t_eval)
Esempio n. 27
0
    def _async_process_discovered_usb_device(self, device: USBDevice) -> None:
        """Process a USB discovery."""
        _LOGGER.debug("Discovered USB Device: %s", device)
        device_tuple = dataclasses.astuple(device)
        if device_tuple in self.seen:
            return
        self.seen.add(device_tuple)

        matched = [matcher for matcher in self.usb if _is_matching(device, matcher)]
        if not matched:
            return

        service_info = UsbServiceInfo(
            device=device.device,
            vid=device.vid,
            pid=device.pid,
            serial_number=device.serial_number,
            manufacturer=device.manufacturer,
            description=device.description,
        )

        sorted_by_most_targeted = sorted(matched, key=lambda item: -len(item))
        most_matched_fields = len(sorted_by_most_targeted[0])

        for matcher in sorted_by_most_targeted:
            # If there is a less targeted match, we only
            # want the most targeted match
            if len(matcher) < most_matched_fields:
                break

            discovery_flow.async_create_flow(
                self.hass,
                matcher["domain"],
                {"source": config_entries.SOURCE_USB},
                service_info,
            )
Esempio n. 28
0
async def test_unordered_UTXOs():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.unordered_UTXOs(os.urandom(HASHX_LEN)) == []
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    utxos = api.UTXOs()
    for hashX in api.hashXs:
        mempool_result = await mempool.unordered_UTXOs(hashX)
        our_result = utxos.get(hashX, [])
        assert set(our_result) == {
            dataclasses.astuple(mr)
            for mr in mempool_result
        }
Esempio n. 29
0
def voxelize_batch(b: batched_example.BatchedExample,
                   voxelization_config: configuration.VoxelizationConfig):
    """Voxelizes a batched example with the given settings."""
    with t.no_grad():
        voxel_content_fn = {
            configuration.TaskType.SEMANTIC:
            batched_example.VoxelContentSemanticLabel(b.mesh_labels),
            configuration.TaskType.FG_BG:
            batched_example.voxel_content_1
        }[voxelization_config.task_type]
        resolution = dataclasses.astuple(voxelization_config.resolution)
        res_mul = voxelization_config.voxelization_image_resolution_multiplier
        res_cons = voxelization_config.conservative_rasterization
        depth_mul = voxelization_config.voxelization_projection_depth_multiplier
        b = batched_example.voxelize(
            b,
            resolution=resolution,
            voxel_content_fn=voxel_content_fn,
            sub_grid_sampling=voxelization_config.sub_grid_sampling,
            image_resolution_multiplier=res_mul,
            conservative_rasterization=res_cons,
            projection_depth_multiplier=depth_mul)

    return b
Esempio n. 30
0
    def add_to_map(self, url: str, action: Method, fut: asyncio.Task,
                   **kwargs) -> None:
        if fut.exception():
            _logger.warning(
                f"Can't {action.value} on {url}: {fut.exception()}")
            self.hook_add_to_map_error(url=url,
                                       action=action,
                                       fut=fut,
                                       **kwargs)
            return

        sub_name = kwargs.pop("sub_name")
        if action == Method.ADD_SUB:
            vs = VirtualSubscription(**kwargs)
            self.real_map[url][sub_name] = vs
            self.name_to_subscription[url][sub_name] = fut.result()

        if action == Method.ADD_MI:
            nodes = kwargs["nodes"]
            vs = self.real_map[url][sub_name]
            vs.subscribe_data_change(nodes, *astuple(kwargs["node_attr"]))
            for node, handle in zip(nodes, fut.result()):
                if isinstance(handle, ua.StatusCode):
                    # a StatusCode is returned, the request has failed.
                    vs.unsubscribe([node])
                    _logger.info(f"Node {node} subscription failed: {handle}")
                    # The node is invalid, remove it from both maps
                    if handle.name == "BadNodeIdUnknown":
                        _logger.warning(
                            f"WARNING: Abandoning {node} because it returned {handle} from {url}"
                        )
                        real_vs = self.ha_client.ideal_map[url][sub_name]
                        real_vs.unsubscribe([node])
                    continue
                self.node_to_handle[url][node] = handle
        self.hook_add_to_map(fut=fut, url=url, action=action, **kwargs)
Esempio n. 31
0
    def save(self, cursor: _cursor):
        data = astuple(self)
        names = ",".join(['"%s"' % f.name for f in fields(self)])

        template = ",".join(["%s"] * len(data))
        values = cursor.mogrify(template, data).decode()
        sql = f"""
            insert into {self._table} ({names}) values ({values}) on conflict do nothing
              """

        if hasattr(self, 'id'):
            sql += " returning *"

        cursor.execute(sql)

        # If the entry is found, correct the auto-generated object.id with the value from database.
        if hasattr(self, 'id'):
            result = cursor.fetchone()
            # nothing was inserted
            if not result:
                sql = f"select id from {self._table} where {self._uniq_field}=%s"
                cursor.execute(sql, (getattr(self, self._uniq_field), ))
                result2 = cursor.fetchone()
                self.id = result2[0]
import dataclasses
from typing import Type, Union


class A:
    pass


dataclasses.fields(<warning descr="'dataclasses.fields' method should be called on dataclass instances or types">A</warning>)
dataclasses.fields(<warning descr="'dataclasses.fields' method should be called on dataclass instances or types">A()</warning>)

dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">A()</warning>)
dataclasses.astuple(<warning descr="'dataclasses.astuple' method should be called on dataclass instances">A()</warning>)
dataclasses.replace(<warning descr="'dataclasses.replace' method should be called on dataclass instances">A()</warning>)


@dataclasses.dataclass
class B:
    pass


dataclasses.fields(B)
dataclasses.fields(B())

dataclasses.asdict(B())
dataclasses.astuple(B())
dataclasses.replace(B())

dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">B</warning>)
dataclasses.astuple(<warning descr="'dataclasses.astuple' method should be called on dataclass instances">B</warning>)
dataclasses.replace(<warning descr="'dataclasses.replace' method should be called on dataclass instances">B</warning>)
def unknown(p):
    dataclasses.fields(p)

    dataclasses.asdict(p)
    dataclasses.astuple(p)
def union1(p: Union[A, B]):
    dataclasses.fields(p)

    dataclasses.asdict(p)
    dataclasses.astuple(p)
    dataclasses.replace(p)
def union2(p: Union[Type[A], Type[B]]):
    dataclasses.fields(p)

    dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">p</warning>)
    dataclasses.astuple(<warning descr="'dataclasses.astuple' method should be called on dataclass instances">p</warning>)
    dataclasses.replace(<warning descr="'dataclasses.replace' method should be called on dataclass instances">p</warning>)
Esempio n. 36
0
 def __str__(self):
     """Format items separated by a separator."""
     items = (str(x) for x in dataclasses.astuple(self) if x is not None)
     return self._sep.join(items)
Esempio n. 37
0
 def __iter__(self):
     return iter(dataclasses.astuple(self))