Exemple #1
0
async def discovergy_meter_read_task(
    *,
    config: Box,
    loop: asyncio.base_events.BaseEventLoop,
) -> None:
    """Async worker to poll the Discovergy API."""
    meters = power.get_meters(config)
    read_interval = timedelta(seconds=int(config.poll.discovergy))
    date_to = arrow.utcnow()
    date_from = date_to - read_interval
    log.debug(f"The Discovergy read interval is {read_interval}.")
    while loop.is_running():
        try:
            # FIXME (a8): This isn't an async call yet because we use requests. OTHO, this doesn't
            # really matter ATM. We only call Discovergy every few hours.
            power.get(config=config,
                      meters=meters,
                      date_from=date_from,
                      date_to=date_to)
        except Exception as e:
            log.warning(
                "Error in Discovergy poller. Retrying in 15 seconds. {}".
                format(str(e)))
            await asyncio.sleep(15)
        else:
            await asyncio.sleep(read_interval.seconds)
            date_from = date_to
            date_to = arrow.utcnow()
Exemple #2
0
    def forward(cls, ctx, dummy, experts_per_sample: List[List[RemoteExpert]],
                k_min: int, backward_k_min: int, timeout_after_k_min: float,
                forward_timeout: Optional[float],
                backward_timeout: Optional[float],
                loop: asyncio.base_events.BaseEventLoop,
                *flat_inputs: torch.Tensor) -> Tuple[torch.Tensor]:
        assert not torch.is_grad_enabled()
        num_samples, max_experts = len(experts_per_sample), max(
            map(len, experts_per_sample))
        flat_inputs_per_sample: List[Tuple[torch.Tensor, ...]] = list(
            zip(*(x.split(1, dim=0) for x in flat_inputs)))
        assert len(experts_per_sample) == len(
            flat_inputs_per_sample) == num_samples

        async def _forward():
            # dispatch tasks to all remote experts, await responses
            pending_tasks = {
                asyncio.create_task(
                    cls._forward_one_expert((i, j), expert,
                                            flat_inputs_per_sample[i]))
                for i in range(num_samples)
                for j, expert in enumerate(experts_per_sample[i])
            }
            alive_grid_indices, alive_flat_outputs = await cls._wait_for_responses(
                pending_tasks, num_samples, k_min, forward_timeout,
                timeout_after_k_min)

            # assemble responses
            alive_ii, alive_jj = map(torch.as_tensor, zip(*alive_grid_indices))
            mask = torch.zeros([num_samples, max_experts],
                               dtype=torch.bool,
                               device=flat_inputs[0].device)
            mask[alive_ii, alive_jj] = True

            alive_flat_outputs_stacked = list(
                map(torch.cat, zip(*alive_flat_outputs)))
            # list of torch tensors, where i-th tensor is of shape [num_responded, *expert_outputs[i].shape]

            outputs = []
            for response_stacked in alive_flat_outputs_stacked:
                output = torch.zeros(
                    [num_samples, max_experts, *response_stacked.shape[1:]],
                    device=response_stacked.device,
                    dtype=response_stacked.dtype,
                    requires_grad=response_stacked.requires_grad)
                output[alive_ii, alive_jj] = response_stacked
                outputs.append(output)

            # save individual outputs for backward pass
            ctx.save_for_backward(alive_ii, alive_jj, *flat_inputs)
            ctx._saved_non_tensors = loop, backward_k_min, backward_timeout, timeout_after_k_min, experts_per_sample
            return (mask, ) + tuple(outputs)

        return loop.run_until_complete(_forward())
Exemple #3
0
async def awattar_read_task(
    *,
    config: Box,
    loop: asyncio.base_events.BaseEventLoop,
) -> None:
    """Async worker to poll the Open Weather Map API."""
    read_interval = timedelta(seconds=int(config.poll.awattar))
    log.debug(f"The Awattar read interval is {read_interval}.")
    while loop.is_running():
        try:
            await awattar.get(config=config)
        except Exception as e:
            log.warning(
                "Error in Awattar data poller. Retrying in 15 seconds. {}".
                format(str(e)))
            await asyncio.sleep(15)
        else:
            await asyncio.sleep(read_interval.seconds)
Exemple #4
0
async def open_weather_map_read_task(
    *,
    config: Box,
    loop: asyncio.base_events.BaseEventLoop,
) -> None:
    """Async worker to poll the Open Weather Map API."""
    read_interval = timedelta(seconds=int(config.poll.weather))
    log.debug(f"The Open Weather Map read interval is {read_interval}.")
    while loop.is_running():
        try:
            # FIXME (a8): This isn't an async call yet because we use requests.
            weather.get(config=config)
        except Exception as e:
            log.warning(
                "Error in Open Weather Map poller. Retrying in 15 seconds. {}".
                format(str(e)))
            await asyncio.sleep(15)
        else:
            await asyncio.sleep(read_interval.seconds)
Exemple #5
0
    async def _gather(
            self, loop: asyncio.base_events.BaseEventLoop, inputs, neurons,
            mode) -> List[Tuple[torch.FloatTensor, torch.LongTensor]]:
        r""" Creates and returns the results from len(neurons) torch forward requests. Uses asyncio for concurrency.

            Args:
                loop (:obj:`asyncio.base_events.BaseEventLoop`, `required`):
                    The asyncio concurrency loop to use while making the n calls.

                inputs (:obj:`List[torch.Tensor]` of shape :obj:`(num_neurons * [shape])`, `required`):
                    List of tensors to send to corresponsing neurons. Tensors are of arbitrary type and shape depending on the
                    modality.

                neurons (:obj:`List[bittensor.proto.Neuron]` of shape :obj:`(num_neurons)`, `required`):
                    List of remote neurons which match length of x. Tensors from x are sent forward to these neurons.

                mode (:obj:`bittensor.proto.Modality` of shape :obj:`(1)`, `required`):
                    Bittensor forward modality type. Enum in [TEXT, IMAGE, TENSOR]

            Returns:
                results (:obj:`List[Tuple[torch.FloatTensor, torch.LongTensor]]`, `required`):
                    result tuples from the forward call on a Receptor class.
        """

        # ---- Calls to fill ----
        calls = []
        for (inputs_i, neuron_i) in list(zip(inputs, neurons)):

            # ---- Find receptor or create one ----
            if neuron_i.public_key not in self._receptors:
                self._receptors[
                    neuron_i.public_key] = bittensor.receptor.Receptor(
                        neuron_i, self.config, self.wallet)
            receptor = self._receptors[neuron_i.public_key]

            # ---- Append async calls ----
            calls.append(
                loop.run_in_executor(None, receptor.forward, inputs_i, mode))

        # ---- Gather results and return ----
        results = await asyncio.gather(*calls)
        return results