Ejemplo n.º 1
0
    def wrap_obs(self, obs: np.array, dummy_value: float = 0.0) -> np.array:
        vardiff = self.to_vars - self.from_vars
        obs = list(obs)
        # insert observation value
        [
            obs.insert(i, dummy_value)
            for i in range(self.from_vars, self.to_vars)
        ]
        # insert intervention value
        [
            obs.insert(i, dummy_value)
            for i in range(self.to_vars + self.from_vars, 2 * self.to_vars)
        ]

        # insert graph state values
        acc_index = self.to_vars * 2
        for i in range(self.from_vars - 1, 1, -1):
            acc_index += i
            [obs.insert(acc_index, dummy_value) for i in range(vardiff)]
            acc_index += vardiff
        # fill up until end
        while acc_index < (int((self.to_vars * 2) + self.to_vars *
                               (self.to_vars - 1) / 2)) - 1:
            obs.append(dummy_value)
            acc_index += 1

        return np.array(obs)
Ejemplo n.º 2
0
    def backpropagation(self,
                        outputs: np.array,
                        x: np.array,
                        y: np.array,
                        n_data: int = 2):
        outputs.insert(0, x)
        N = len(self.layers)
        deltas = []
        delta = None
        loss = np.nan

        for i in range(N, 0, -1):
            output: np.array = outputs[i]
            prev_output: np.array = outputs[i - 1]
            layer: Layer = self.layers[i - 1]

            if i == N:
                d1 = self.dloss(y, output)
                loss = np.sum(d1)
            else:
                layer2: Layer = self.layers[i]
                w2, b2 = layer2.get_weights()
                d1 = delta.dot(w2.T)

            delta = d1 * layer.dactivation(output)
            delta_w = prev_output.T.dot(delta)
            delta_b = delta
            deltas.append((delta_w, delta_b))

        layers = self.layers[::-1]
        for i in range(len(deltas) - 1, -1, -1):
            delta_w, delta_b = deltas[i]
            layer = layers[i]

            layer.zero_grad()
            layer = self.optimizer(layer, delta_w, delta_b, n_data)
            layer.update()

        return dict(loss=loss)