Esempio n. 1
0
    def a():
        base = 5
        stair_length = 9
        stair = [to_tensor(range(i + base)) for i in range(stair_length)]

        pad = [min_length_pad(s, stair_length + base) for s in stair]
        print(to_tensor(pad))
Esempio n. 2
0
def stest_many_versus_many(model, data_dir, img_size, threshold=0.5):
    """ """
    data_iterator = iter(
        DataLoader(
            PairDataset(
                data_dir,
                transform=transforms.Compose([
                    transforms.Grayscale(),
                    transforms.Resize(img_size),
                    transforms.ToTensor(),
                ]),
            ),
            num_workers=0,
            batch_size=1,
            shuffle=True,
        ))
    for i in range(10):
        x0, x1, is_diff = next(data_iterator)
        distance = (torch.pairwise_distance(*model(
            to_tensor(x0, device=global_torch_device()),
            to_tensor(x1, device=global_torch_device()),
        )).cpu().item())
        boxed_text_overlay_plot(
            torchvision.utils.make_grid(torch.cat((x0, x1), 0)),
            f"Truth: {'Different' if is_diff.cpu().item() else 'Alike'},"
            f" Dissimilarity: {distance:.2f},"
            f" Verdict: {'Different' if distance > threshold else 'Alike'}",
        )
Esempio n. 3
0
def test_multi_dim():
    pos_size = (2, 3, 2)
    a_size = (2, 4, 5)
    model = MLP(input_shape=pos_size, output_shape=a_size)

    pos_1 = to_tensor(numpy.random.rand(64, numpy.prod(pos_size[1:])), device="cpu")
    pos_2 = to_tensor(numpy.random.rand(64, numpy.prod(pos_size[1:])), device="cpu")
    print(model(pos_1, pos_2))
Esempio n. 4
0
    def single_cat():
        s = (1, 2)
        a = (2, )
        model = CategoricalMLP(input_shape=s, output_shape=a)

        inp = to_tensor(numpy.random.rand(64, s[0]), device="cpu")
        inp2 = to_tensor(numpy.random.rand(64, s[1]), device="cpu")
        print(model(inp, inp2).sample())
Esempio n. 5
0
    def gasdasa():
        """ """
        from draugr.torch_utilities import to_tensor

        base = 5
        stair_length = 9
        stair = [to_tensor(range(i + base)) for i in range(stair_length)]

        trunc = [last_dim_truncate(s, base) for s in stair]
        print(to_tensor(trunc))
Esempio n. 6
0
  def __build__(
      self,
      observation_space: ObservationSpace,
      action_space: ActionSpace,
      signal_space: SignalSpace,
      metric_writer: Writer = MockWriter(),
      print_model_repr: bool = True,
      ) -> None:
    """

@param observation_space:
@param action_space:
@param signal_space:
@param metric_writer:
@param print_model_repr:
@return:
"""
    if action_space.is_discrete:
      raise ActionSpaceNotSupported(
          "discrete action space not supported in this implementation"
          )

    self._critic_arch_spec.kwargs["input_shape"] = (
        self._input_shape + self._output_shape
    )
    self._critic_arch_spec.kwargs["output_shape"] = 1

    self.critic_1 = self._critic_arch_spec().to(self._device)
    self.critic_1_target = copy.deepcopy(self.critic_1).to(self._device)
    freeze_model(self.critic_1_target, True, True)

    self.critic_2 = self._critic_arch_spec().to(self._device)
    self.critic_2_target = copy.deepcopy(self.critic_2).to(self._device)
    freeze_model(self.critic_2_target, True, True)

    self.critic_optimiser = self._critic_optimiser_spec(
        itertools.chain(self.critic_1.parameters(), self.critic_2.parameters())
        )

    self._actor_arch_spec.kwargs["input_shape"] = self._input_shape
    self._actor_arch_spec.kwargs["output_shape"] = self._output_shape
    self.actor = self._actor_arch_spec().to(self._device)
    self.actor_optimiser = self._actor_optimiser_spec(self.actor.parameters())

    if self._auto_tune_sac_alpha:
      self._target_entropy = -torch.prod(
          to_tensor(self._output_shape, device=self._device)
          ).item()
      self._log_sac_alpha = nn.Parameter(
          torch.log(to_tensor(self._sac_alpha, device=self._device)),
          requires_grad=True,
          )
      self.sac_alpha_optimiser = self._auto_tune_sac_alpha_optimiser_spec(
          [self._log_sac_alpha]
          )
Esempio n. 7
0
    def _update(self, *, metric_writer=MockWriter()) -> float:
        """

:param metric_writer:

:returns:
"""

        if not len(self._memory_buffer) > 0:
            raise NoTrajectoryException

        trajectory = self._memory_buffer.retrieve_trajectory()
        self._memory_buffer.clear()

        log_probs = to_tensor(
            [
                self.get_log_prob(d, a)
                for d, a in zip(trajectory.distribution, trajectory.action)
            ],
            device=self._device,
        )

        signal = to_tensor(trajectory.signal, device=self._device)
        non_terminal = to_tensor(non_terminal_numerical_mask(
            trajectory.terminated),
                                 device=self._device)

        discounted_signal = discount_rollout_signal_torch(
            signal,
            self._discount_factor,
            device=self._device,
            non_terminal=non_terminal,
        )

        loss = -(log_probs * discounted_signal).mean()

        self._optimiser.zero_grad()
        loss.backward()
        self.post_process_gradients(self.distributional_regressor.parameters())
        self._optimiser.step()

        if self._scheduler:
            self._scheduler.step()
            if metric_writer:
                for i, param_group in enumerate(self._optimiser.param_groups):
                    metric_writer.scalar(f"lr{i}", param_group["lr"])

        loss_cpu = loss.detach().to("cpu").numpy()
        if metric_writer:
            metric_writer.scalar("Loss", loss_cpu)

        return loss_cpu.item()
Esempio n. 8
0
    def stest_multi_dim_normal2321412121():
        """

      """
        s = (19,)
        s1 = (4,)
        batch_size = (100,)
        output_shape = 2
        model = LateConcatInputMLP(input_shape=s + s1, output_shape=output_shape)

        inp = to_tensor(numpy.random.random((*batch_size, *s)), device="cpu")
        late_input = to_tensor(numpy.random.random((*batch_size, *s1)), device="cpu")
        print(model.forward(inp, late_input))
Esempio n. 9
0
    def asadsa2():
        """ """
        from draugr.torch_utilities import to_tensor
        from neodroidaudition.data.recognition.libri_speech import LibriSpeech
        from neodroidaudition.noise_generation.gaussian_noise import white_noise
        import torchaudio
        from pathlib import Path

        libri_speech = LibriSpeech(path=Path.home() / "Data" / "Audio" /
                                   "Speech" / "LibriSpeech")
        files, sr = zip(*[(v[0].numpy(), v[1])
                          for _, v in zip(range(1), libri_speech)])
        assert all([sr[0] == s for s in sr[1:]])

        normed = files[0]
        mixed = mix_ratio(normed, normed, 0)
        mixed2 = mix_ratio(mixed, mixed, 0)
        print(normed, mixed)
        print(mixed2, mixed)
        print(root_mean_square(normed))
        print(root_mean_square(mixed))
        print(root_mean_square(mixed2))
        assert numpy.allclose(normed, mixed)
        assert numpy.allclose(mixed2, mixed)
        torchaudio.save(
            str(ensure_existence(Path.cwd() / "exclude") / "mixed_same.wav"),
            to_tensor(mixed),
            int(sr[0]),
        )
Esempio n. 10
0
    def multi_cat():
        s = (2, 2)
        a = (2, 2)
        model = MultipleCategoricalMLP(input_shape=s, output_shape=a)

        inp = to_tensor(numpy.random.rand(64, s[0]), device="cpu")
        print(model.sample(model(inp, inp)))
Esempio n. 11
0
def test_multi_dim_normal():
    s = (10, 2, 3)
    a = (2, 10)
    model = PreConcatInputMLP(input_shape=s, output_shape=a)

    inp = [to_tensor(range(s_), device="cpu") for s_ in s]
    print(model.forward(*inp))
Esempio n. 12
0
def test_normal():
    s = (10, )
    a = (10, )
    model = PreConcatInputMLP(input_shape=s, output_shape=a)

    inp = to_tensor(range(s[0]), device="cpu")
    print(model.forward(inp))
Esempio n. 13
0
    def get_instanced(self, idx):
        """
        Return a separate channel target for each instance in image

        :param idx:
        :type idx:
        :return:
        :rtype:"""
        img = numpy.array(
            Image.open(self._img_path / self.imgs[idx]).convert("RGB"))
        mask = numpy.array(Image.open(self._ped_path / self.masks[idx]))

        img = cv2_resize(img, self.image_size_T)
        mask = cv2_resize(mask, self.image_size_T, InterpolationEnum.nearest)

        obj_ids = numpy.unique(
            mask)  # instances are encoded as different colors
        obj_ids = obj_ids[1:]  # first id is the background, so remove it

        # split the color-encoded mask into a set of binary masks
        masks = mask == obj_ids[:, None, None]
        zero_mask_clone = self.zero_mask.copy()
        zero_mask_clone[:masks.shape[0]] = masks

        return (
            uint_hwc_to_chw_float_tensor(to_tensor(img, dtype=torch.uint8)),
            torch.as_tensor(zero_mask_clone, dtype=torch.uint8),
        )
Esempio n. 14
0
    def predictor_shape(self) -> Tuple[int, ...]:
        """

:return:
:rtype:
"""
        return to_tensor(self.__getitem__(0)[0]).shape
Esempio n. 15
0
def test_single_dim():
    pos_size = (4,)
    a_size = (1,)
    model = MLP(input_shape=pos_size, output_shape=a_size)

    pos_1 = to_tensor(numpy.random.rand(64, pos_size[0]), device="cpu")
    print(model(pos_1))
Esempio n. 16
0
def test_hidden_dim():
    pos_size = (4,)
    hidden_size = (2, 3)
    a_size = (2,)
    model = MLP(input_shape=pos_size, hidden_layers=hidden_size, output_shape=a_size)

    pos_1 = to_tensor(numpy.random.rand(64, pos_size[0]), device="cpu")
    print(model(pos_1))
Esempio n. 17
0
 def sample_from(self, *encoding) -> torch.Tensor:
     sample = to_tensor(*encoding).to(device=next(self.parameters()).device)
     assert sample.shape[-1] == self._latent_size, (
         f"sample.shape[-1]:{sample.shape[-1]} !="
         f" self._encoding_size:{self._latent_size}"
     )
     sample = self.decode(*sample).to("cpu")
     return sample
Esempio n. 18
0
def asdasidoj():
    """ """
    X = to_tensor([torch.diag(torch.arange(i, i + 2)) for i in range(200)])
    x_train = TensorDataset(X[:100])
    x_val = TensorDataset(X[100:])

    for train, val in cross_validation_generator(x_train, x_val):
        print(len(train), len(val))
        print(train[0], val[0])
Esempio n. 19
0
def generate_babble_noise(samples: Iterable[Iterable[Sequence]],
                          sampling_rate,
                          *,
                          export_path: Path = None) -> Iterable:
    samples = numpy.array(min_length_truncate_batch(samples))
    mixed = numpy.sum(samples / numpy.max(numpy.abs(samples)), 0)
    if export_path:
        torchaudio.save(str(export_path), to_tensor(mixed), sampling_rate)
    return mixed
Esempio n. 20
0
    def get_instanced(self, idx):
        """

:param idx:
:type idx:
:return:
:rtype:
"""
        img = to_tensor(Image.open(self._img_path / self.imgs[idx]).convert("RGB"))
        mask = to_tensor(Image.open(self._ped_path / self.masks[idx]))

        obj_ids = torch.unique(mask)  # instances are encoded as different colors
        obj_ids = obj_ids[1:]  # first id is the background, so remove it

        # split the color-encoded mask into a set of binary masks
        masks = mask == obj_ids[:, None, None]
        masks = torch.as_tensor(masks, dtype=torch.uint8)

        return img, masks
Esempio n. 21
0
    def s():

        numpy.random.seed(23)
        size = (10, 3, 1)
        a_size = (size[0] + 1, *size[1:])
        signal = numpy.zeros(size)
        non_terminal = numpy.ones(size)
        value_estimate = numpy.random.random(a_size)
        non_terminal[3, 0] = 0
        non_terminal[8, 1] = 0
        signal[-5:, :] = -1

        signals = to_tensor(signal, device="cpu")
        non_terminals = to_tensor(non_terminal, device="cpu")
        value_estimates = to_tensor(value_estimate, device="cpu")

        r, a = torch_compute_gae(signals, non_terminals, value_estimates)
        print(r, a)
        print(size, r.shape, a.shape)
Esempio n. 22
0
    def _sample_model(self, state: Any) -> numpy.ndarray:
        """

@param state:
@return:
"""
        with torch.no_grad():
            max_q_action = self.value_model(
                to_tensor(state, device=self._device, dtype=self._state_type))
            return max_q_action.max(-1)[-1].unsqueeze(-1).detach().to(
                "cpu").numpy()
Esempio n. 23
0
    def stest_single_dim_cat():
        pos_size = (4,)
        a_size = (2,)
        batch_size = 64
        model = CategoricalActorCriticMLP(input_shape=pos_size, output_shape=a_size)

        print(
            torch.mean(
                to_tensor(
                    [
                        model(
                            to_tensor(
                                numpy.random.rand(batch_size, pos_size[0]), device="cpu"
                            )
                        )[0].sample()
                        for _ in range(1000)
                    ]
                )
            )
        )
Esempio n. 24
0
    def distinct_real():
        from draugr.visualisation import ltass_plot
        from neodroidaudition.data.recognition.libri_speech import LibriSpeech

        samples = 6
        d_male = iter(
            LibriSpeech(path=Path.home() / "Data" / "Audio" / "Speech" /
                        "LibriSpeech",
                        custom_subset=LibriSpeech.CustomSubsets.male))
        d_female = iter(
            LibriSpeech(path=Path.home() / "Data" / "Audio" / "Speech" /
                        "LibriSpeech",
                        custom_subset=LibriSpeech.CustomSubsets.female))
        male_unique = {}
        while len(male_unique) < samples // 2:
            s = next(d_male)
            speaker_id = s[-3]
            if speaker_id not in male_unique:
                male_unique[speaker_id] = s

        female_unique = {}
        while len(female_unique) < samples // 2:
            s = next(d_female)
            speaker_id = s[-3]
            if speaker_id not in female_unique:
                female_unique[speaker_id] = s

        unique = (*male_unique.values(), *female_unique.values())

        files, sr = zip(*[(v[0].numpy(), v[1])
                          for _, v in zip(range(samples), unique)])
        assert all([sr[0] == s for s in sr[1:]])
        sr = sr[0]

        noise_welch = generate_speech_shaped_noise(
            files,
            sr,
            long_term_avg=True,
            export_path=Path('exclude') / 'ssn_welch.wav')[0]
        noise_fft = generate_speech_shaped_noise(files,
                                                 sr,
                                                 long_term_avg=False,
                                                 export_path=Path('exclude') /
                                                 'ssn_fft.wav')[0]
        files = numpy.concatenate(files, -1)[0]
        torchaudio.save(str(Path('exclude') / 'ssn_signal.wav'),
                        to_tensor(files), sr)

        ltass_plot(files, sr, label='signal')
        ltass_plot(noise_welch, sr, label='noise_welch')
        ltass_plot(noise_fft, sr, label='noise_fft')

        pyplot.legend()
        pyplot.show()
Esempio n. 25
0
def generate_noise(
        length: int,
        *,
        seed: int = None,
        noise_type: GaussianNoiseTypeEnum = GaussianNoiseTypeEnum.white,
        export_path: Path = None,
        sampling_rate: int = 16000) -> numpy.ndarray:
    normalised = noise_type(length, seed)
    if export_path:
        torchaudio.save(str(export_path), to_tensor(normalised), sampling_rate)
    return normalised
Esempio n. 26
0
    def get_binary(self, idx):
        """

:param idx:
:type idx:
:return:
:rtype:
"""
        img = numpy.array(Image.open(self._img_path / self.imgs[idx]).convert("RGB"))
        mask = numpy.array(Image.open(self._ped_path / self.masks[idx]))

        mask[mask != 0] = 1.0

        img = cv2_resize(img, self.image_size_T)
        mask = cv2_resize(mask, self.image_size_T)

        return (
            uint_hwc_to_chw_float_tensor(to_tensor(img, dtype=torch.uint8)),
            to_tensor(mask).unsqueeze(0),
        )
Esempio n. 27
0
    def _sample(self, state: Sequence) -> Any:
        """

@param state:
@param deterministic:
@return:
"""

        with torch.no_grad():
            action_out = self._actor(to_tensor(state,
                                               device=self._device)).detach()

        deterministic = False
        if not deterministic:
            # Add action space noise for exploration, alternative is parameter space noise
            noise = self._random_process.sample(action_out.shape)
            action_out += to_tensor(noise * self._noise_factor,
                                    device=self.device)

        return action_out
def stest_many_versus_many2(model: Module,
                            data_dir: Path,
                            img_size: Tuple[int, int],
                            threshold=0.5):
    """

:param model:
:type model:
:param data_dir:
:type data_dir:
:param img_size:
:type img_size:
:param threshold:
:type threshold:
"""
    dataiter = iter(
        DataLoader(
            PairDataset(
                data_dir,
                transform=transforms.Compose([
                    transforms.Grayscale(),
                    transforms.Resize(img_size),
                    transforms.ToTensor(),
                ]),
            ),
            num_workers=4,
            batch_size=1,
            shuffle=True,
        ))
    for i in range(10):
        x0, x1, is_diff = next(dataiter)
        distance = (model(
            to_tensor(x0, device=global_torch_device()),
            to_tensor(x1, device=global_torch_device()),
        ).cpu().item())
        boxed_text_overlay_plot(
            torchvision.utils.make_grid(torch.cat((x0, x1), 0)),
            f"Truth: {'Different' if is_diff.cpu().item() else 'Alike'},"
            f" Dissimilarity: {distance:.2f},"
            f" Verdict: {'Different' if distance > threshold else 'Alike'}",
        )