Example #1
0
    def train(self, func, x0):
        """Optimize parameters to minimze loss.

        Arguments:
            - func: A function of the parameters that returns a tuple with the gradient and the loss respectively.
            - x0: Parameters to use as starting point.

        Returns the parameters that minimize the loss.
        """
        if self._uW is None:
            # TODO: should we not refcast here?
            self._uW = numx.zeros_like(x0)
            self.deltaW = numx.ones_like(x0) * self.deltainit

        updated_params = x0.copy()

        for _ in range(self.epochs):
            # TODO: properly name variables
            uW = func(updated_params)[0]

            WW = self._uW * uW

            self.deltaW *= self.etaplus * (WW > 0) + self.etamin * (
                WW < 0) + 1 * (WW == 0)

            self.deltaW = numx.maximum(self.deltaW, self.deltamin)
            self.deltaW = numx.minimum(self.deltaW, self.deltamax)

            updated_params -= self.deltaW * numx.sign(uW)

            self._uW = uW

        return updated_params
Example #2
0
    def train(self, func, x0):
        """Optimize parameters to minimze loss.

        Arguments:
            - func: A function of the parameters that returns a tuple with the gradient and the loss respectively.
            - x0: Parameters to use as starting point.

        Returns the parameters that minimize the loss.
        """
        if self._uW is None:
            # TODO: should we not refcast here?
            self._uW = numx.zeros_like(x0)
            self.deltaW = numx.ones_like(x0) * self.deltainit

        updated_params = x0.copy()

        for _ in range(self.epochs):
            # TODO: properly name variables
            uW = func(updated_params)[0]

            WW = self._uW * uW;

            self.deltaW *= self.etaplus * (WW > 0) + self.etamin * (WW < 0) + 1 * (WW == 0);

            self.deltaW = numx.maximum(self.deltaW, self.deltamin)
            self.deltaW = numx.minimum(self.deltaW, self.deltamax)

            updated_params -= self.deltaW * numx.sign(uW)

            self._uW = uW

        return updated_params
Example #3
0
def switching_signals(f1, f2, T, n_switches, n_samples=1):
    samples = []
    # seconds per simulation timestep
    t = numx.arange(T)
    proto_1 = numx.atleast_2d(numx.sin(2 * numx.pi * t * f1)).T
    proto_2 = numx.atleast_2d(numx.sin(2 * numx.pi * t * f2)).T

    for _ in range(n_samples):
        n_periods1 = numx.random.randint(4, 8, size=(n_switches))
        n_periods2 = numx.random.randint(4, 8, size=(n_switches))

        #n_periods1, n_periods2 = [1], [0]

        switch = []
        signal = []
        for p1, p2 in zip(n_periods1, n_periods2):
            switch.extend([numx.ones_like(proto_1)] * p1)
            switch.extend([-1 * numx.ones_like(proto_2)] * p2)
            signal.extend([proto_1] * p1)
            signal.extend([proto_2] * p2)

        samples.append([numx.concatenate((numx.concatenate(switch), numx.concatenate(signal)), 1)])
    return samples
Example #4
0
def switching_signals(f1, f2, T, n_switches, n_samples=1):
    samples = []
    # seconds per simulation timestep
    t = numx.arange(T)
    proto_1 = numx.atleast_2d(numx.sin(2 * numx.pi * t * f1)).T
    proto_2 = numx.atleast_2d(numx.sin(2 * numx.pi * t * f2)).T

    for _ in range(n_samples):
        n_periods1 = numx.random.randint(4, 8, size=(n_switches))
        n_periods2 = numx.random.randint(4, 8, size=(n_switches))

        # n_periods1, n_periods2 = [1], [0]

        switch = []
        signal = []
        for p1, p2 in zip(n_periods1, n_periods2):
            switch.extend([numx.ones_like(proto_1)] * p1)
            switch.extend([-1 * numx.ones_like(proto_2)] * p2)
            signal.extend([proto_1] * p1)
            signal.extend([proto_2] * p2)

        samples.append([numx.concatenate((numx.concatenate(switch), numx.concatenate(signal)), 1)])
    return samples