Esempio n. 1
0
    def __init__(self, input_size, hidden_size,
                 order,
                 theta=100,  # relative to dt=1
                 method='zoh',
                 trainable_input_encoders=True,
                 trainable_hidden_encoders=True,
                 trainable_memory_encoders=True,
                 trainable_input_kernel=True,
                 trainable_hidden_kernel=True,
                 trainable_memory_kernel=True,
                 trainable_A=False,
                 trainable_B=False,
                 input_encoders_initializer=lecun_uniform,
                 hidden_encoders_initializer=lecun_uniform,
                 memory_encoders_initializer=partial(torch.nn.init.constant_, val=0),
                 input_kernel_initializer=torch.nn.init.xavier_normal_,
                 hidden_kernel_initializer=torch.nn.init.xavier_normal_,
                 memory_kernel_initializer=torch.nn.init.xavier_normal_,

                 hidden_activation='tanh',
                 ):
        super(LMUCell, self).__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.order = order

        if hidden_activation == 'tanh':
            self.hidden_activation = torch.tanh
        elif hidden_activation == 'relu':
            self.hidden_activation = torch.relu
        else:
            raise NotImplementedError("hidden activation '{}' is not implemented".format(hidden_activation))

        realizer = Identity()
        self._realizer_result = realizer(
            LegendreDelay(theta=theta, order=self.order))
        self._ss = cont2discrete(
            self._realizer_result.realization, dt=1., method=method)
        self._A = self._ss.A - np.eye(order)  # puts into form: x += Ax
        self._B = self._ss.B
        self._C = self._ss.C
        assert np.allclose(self._ss.D, 0)  # proper LTI

        self.input_encoders = nn.Parameter(torch.Tensor(1, input_size), requires_grad=trainable_input_encoders)
        self.hidden_encoders = nn.Parameter(torch.Tensor(1, hidden_size), requires_grad=trainable_hidden_encoders)
        self.memory_encoders = nn.Parameter(torch.Tensor(1, order), requires_grad=trainable_memory_encoders)
        self.input_kernel = nn.Parameter(torch.Tensor(hidden_size, input_size), requires_grad=trainable_input_kernel)
        self.hidden_kernel = nn.Parameter(torch.Tensor(hidden_size, hidden_size), requires_grad=trainable_hidden_kernel)
        self.memory_kernel = nn.Parameter(torch.Tensor(hidden_size, order), requires_grad=trainable_memory_kernel)
        self.AT = nn.Parameter(torch.Tensor(self._A), requires_grad=trainable_A)
        self.BT = nn.Parameter(torch.Tensor(self._B), requires_grad=trainable_B)

        # Initialize parameters
        input_encoders_initializer(self.input_encoders)
        hidden_encoders_initializer(self.hidden_encoders)
        memory_encoders_initializer(self.memory_encoders)
        input_kernel_initializer(self.input_kernel)
        hidden_kernel_initializer(self.hidden_kernel)
        memory_kernel_initializer(self.memory_kernel)
Esempio n. 2
0
def test_doubleexp_discrete():
    sys = PadeDelay(0.1, order=5)

    tau1 = 0.05
    tau2 = 0.02
    dt = 0.002
    syn = DoubleExp(tau1, tau2)

    FH = ss2sim(sys, syn, dt=dt)

    a1 = np.exp(-dt / tau1)
    a2 = np.exp(-dt / tau2)
    t1 = 1 / (1 - a1)
    t2 = 1 / (1 - a2)
    c = [a1 * a2 * t1 * t2, - (a1 + a2) * t1 * t2, t1 * t2]
    sys = cont2discrete(sys, dt=dt)

    A = sys.A
    FHA = c[2] * np.dot(A, A) + c[1] * A + c[0] * np.eye(len(A))
    B = sys.B
    FHB = (c[2] * A + (c[1] + c[2]) * np.eye(len(A))).dot(B)
    assert np.allclose(FH.A, FHA)
    assert np.allclose(FH.B, FHB)
    assert np.allclose(FH.C, sys.C)
    assert np.allclose(FH.D, sys.D)
Esempio n. 3
0
def test_state_norm(plt):
    # Choose a filter, timestep, and number of simulation timesteps
    sys = Alpha(0.1)
    dt = 0.000001
    length = 2000000

    # Modify the state-space to read out the state vector
    A, B, C, D = sys2ss(sys)
    old_C = C
    C = np.eye(len(A))
    D = np.zeros((len(A), B.shape[1]))

    response = np.empty((length, len(C)))
    for i in range(len(C)):
        # Simulate the state vector
        response[:, i] = impulse((A, B, C[i, :], D[i, :]), dt, length)

    # Check that the power of each state equals the H2-norm of each state
    # The analog case is the same after scaling since dt is approx 0.
    actual = norm(response, axis=0) * dt
    assert np.allclose(actual, state_norm(cont2discrete(sys, dt)))
    assert np.allclose(actual, state_norm(sys) * np.sqrt(dt))

    plt.figure()
    plt.plot(response[:, 0], label="$x_0$")
    plt.plot(response[:, 1], label="$x_1$")
    plt.plot(np.dot(response, old_C.T), label="$y$")
    plt.legend()
Esempio n. 4
0
def delayed_synapse():
    a = 0.1  # desired delay
    b = 0.01  # synapse delay
    tau = 0.01  # recurrent tau
    hz = 15  # input frequency
    t = 1.0  # simulation time
    dt = 0.00001  # simulation timestep
    order = 6  # order of pade approximation
    tau_probe = 0.02

    dexp_synapse = DoubleExp(tau, tau / 5)

    sys_lambert = lambert_delay(a, b, tau, order - 1, order)
    synapse = (cont2discrete(Lowpass(tau), dt=dt) *
               DiscreteDelay(int(b / dt)))

    n_neurons = 2000
    neuron_type = PerfectLIF()

    A, B, C, D = sys_lambert.observable.transform(5*np.eye(order)).ss

    sys_normal = PadeDelay(a, order)
    assert len(sys_normal) == order

    with Network(seed=0) as model:
        stim = Node(output=WhiteSignal(t, high=hz, y0=0))

        x = EnsembleArray(n_neurons / order, len(A), neuron_type=neuron_type)
        output = Node(size_in=1)

        Connection(x.output, x.input, transform=A, synapse=synapse)
        Connection(stim, x.input, transform=B, synapse=synapse)
        Connection(x.output, output, transform=C, synapse=None)
        Connection(stim, output, transform=D, synapse=None)

        lowpass_delay = LinearNetwork(
            sys_normal, n_neurons_per_ensemble=n_neurons / order,
            synapse=tau, input_synapse=tau,
            dt=None, neuron_type=neuron_type, radii=1.0)
        Connection(stim, lowpass_delay.input, synapse=None)

        dexp_delay = LinearNetwork(
            sys_normal, n_neurons_per_ensemble=n_neurons / order,
            synapse=dexp_synapse, input_synapse=dexp_synapse,
            dt=None, neuron_type=neuron_type, radii=1.0)
        Connection(stim, dexp_delay.input, synapse=None)

        p_stim = Probe(stim, synapse=tau_probe)
        p_output_delayed = Probe(output, synapse=tau_probe)
        p_output_lowpass = Probe(lowpass_delay.output, synapse=tau_probe)
        p_output_dexp = Probe(dexp_delay.output, synapse=tau_probe)

    with Simulator(model, dt=dt, seed=0) as sim:
        sim.run(t)

    return (a, dt, sim.trange(), sim.data[p_stim],
            sim.data[p_output_delayed], sim.data[p_output_lowpass],
            sim.data[p_output_dexp])
Esempio n. 5
0
def test_principle3_discrete():
    sys = PadeDelay(0.1, order=5)

    tau = 0.01
    dt = 0.002
    syn = Lowpass(tau)

    FH = ss2sim(sys, syn, dt=dt)

    a = np.exp(-dt / tau)
    sys = cont2discrete(sys, dt=dt)
    assert np.allclose(FH.A, (sys.A - a * np.eye(len(sys))) / (1 - a))
    assert np.allclose(FH.B, sys.B / (1 - a))
    assert np.allclose(FH.C, sys.C)
    assert np.allclose(FH.D, sys.D)

    # We can also do the discretization ourselves and then pass in dt=None
    assert ss_equal(
        ss2sim(sys, cont2discrete(syn, dt=dt), dt=None), FH)
Esempio n. 6
0
def _apply_filter(sys, dt, u):
    # "Correct" implementation of filt that has a single time-step delay
    # see Nengo issue #938
    if dt is not None:
        num, den = cont2discrete(sys, dt).tf
    elif not sys.analog:
        num, den = sys.tf
    else:
        raise ValueError("system (%s) must be discrete if not given dt" % sys)

    # convert from the polynomial representation, and add back the leading
    # zeros that were dropped by poly1d, since lfilter will shift it the
    # wrong way (it will add the leading zeros back to the end, effectively
    # removing the delay)
    num, den = map(np.asarray, (num, den))
    num = np.append([0]*(len(den) - len(num)), num)
    return lfilter(num, den, u, axis=-1)
Esempio n. 7
0
def test_state_norm(plt):
    # Choose a filter, timestep, and number of simulation timesteps
    sys = Alpha(0.1)
    dt = 0.000001
    length = 2000000
    assert np.allclose(dt * length, 2.0)

    # Check that the power of each state equals the H2-norm of each state
    # The analog case is the same after scaling since dt is approx 0.
    response = sys.X.impulse(length, dt)
    actual = norm(response, axis=0) * dt
    assert np.allclose(actual, state_norm(cont2discrete(sys, dt)))
    assert np.allclose(actual, state_norm(sys) * np.sqrt(dt))

    step = int(0.002 / dt)
    plt.figure()
    plt.plot(response[::step, 0], label="$x_0$")
    plt.plot(response[::step, 1], label="$x_1$")
    plt.plot(np.dot(response[::step], sys.C.T), label="$y$")
    plt.legend()
Esempio n. 8
0
def test_non_siso_filtering(rng):
    sys = PadeDelay(0.1, order=4)
    length = 1000

    SIMO = sys.X
    assert not SIMO.is_SISO
    assert SIMO.size_in == 1
    assert SIMO.size_out == len(sys)

    x = SIMO.impulse(length)
    for i, (sub1, sub2) in enumerate(zip(sys, SIMO)):
        assert sub1 == sub2
        y1 = sub1.impulse(length)
        y2 = sub2.impulse(length)
        _transclose(shift(y1), shift(y2), x[:, i])

    B = np.asarray([[1, 2, 3], [0, 0, 0], [0, 0, 0], [0, 0, 0]]) * sys.B
    u = rng.randn(length, 3)

    Bu = u.dot([1, 2, 3])
    assert Bu.shape == (length,)
    MISO = LinearSystem((sys.A, B, sys.C, np.zeros((1, 3))), analog=True)
    assert not MISO.is_SISO
    assert MISO.size_in == 3
    assert MISO.size_out == 1

    y = cont2discrete(MISO, dt=0.001).filt(u)
    assert y.shape == (length,)
    assert np.allclose(shift(sys.filt(Bu)), y)

    MIMO = MISO.X
    assert not MIMO.is_SISO
    assert MIMO.size_in == 3
    assert MIMO.size_out == 4

    y = MIMO.filt(u)
    I = np.eye(len(sys))
    for i, sub1 in enumerate(MIMO):
        sub2 = LinearSystem((sys.A, B, I[i:i+1], np.zeros((1, 3))))
        _transclose(sub1.filt(u), sub2.filt(u), y[:, i])
Esempio n. 9
0
def test_impulse():
    dt = 0.001
    tau = 0.005
    length = 500

    delta = np.zeros(length)
    delta[0] = 1. / dt

    sys = Lowpass(tau)
    response = sys.impulse(length, dt)
    assert not np.allclose(response[0], 0)

    # should give the same result as using filt
    assert np.allclose(response, sys.filt(delta, dt))

    # and should default to the same dt
    assert sys.default_dt == dt
    assert np.allclose(response, sys.impulse(length))

    # should also accept discrete systems
    dss = cont2discrete(sys, dt=dt)
    assert not dss.analog
    assert np.allclose(response, dss.impulse(length) / dt)
    assert np.allclose(response, dss.impulse(length, dt=dt))
Esempio n. 10
0
    def __init__(self, input_size, hidden_size, memory_size, theta, name='garbage', discretizer = 'zoh',nonlinearity='sigmoid', 
                        A_learnable = False, B_learnable = False, activate=False):
        super(ASSVMU, self).__init__()
        
        ### SIZE
        self.k = input_size
        self.n = hidden_size
        self.d = memory_size
        

        ### PARAMETERS
        self.Wx = nn.Parameter(torch.Tensor(self.n,self.k))
        self.Wh = nn.Parameter(torch.Tensor(self.n,self.n))
        self.Wm = nn.Parameter(torch.Tensor(self.n,self.d))
        self.ex = nn.Parameter(torch.Tensor(1,self.k))
        self.eh = nn.Parameter(torch.Tensor(1,self.n))
        self.em = nn.Parameter(torch.Tensor(1,self.d))

        ### A,B MATRIX ----- FIX??
        order=self.d
        Q = np.arange(order, dtype=np.float64)
        R = (2 * Q + 1)[:, None] / theta
        j, i = np.meshgrid(Q, Q)
        A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
        B = (-1.0) ** Q[:, None] * R
        C = np.ones((1, order))
        D = np.zeros((1,))
        self._ss = cont2discrete((A, B, C, D), dt=0.01, method=discretizer)
        self._A = self._ss.A
        self._B = self._ss.B

        ### NON-LINEARITY
        self.nl = nonlinearity
        if self.nl == 'sigmoid':
            self.act = nn.Sigmoid()
        elif self.nl == 'tanh':
            self.act = nn.Tanh()
        else:
            self.act = nn.ReLU()

        ### NN
        self.fc = nn.Linear(self.n,self.n)

        if activate:
            self.nn_act = self.act
        else:
            self.nn_act = nn.LeakyReLU(1.0) #Identity Function

        ### INITIALIZATION
        torch.nn.init.xavier_normal_(self.Wm)    ##### FIGURE THIS OUT!!
        torch.nn.init.xavier_normal_(self.Wx)
        torch.nn.init.xavier_normal_(self.Wh)
        torch.nn.init.zeros_(self.em)
        torch.nn.init.uniform_(self.ex, -np.sqrt(3/self.d), np.sqrt(3/self.d))
        torch.nn.init.uniform_(self.eh, -np.sqrt(3/self.d), np.sqrt(3/self.d))


        #### TRIAL
        self.register_buffer('AT', torch.Tensor(self._A))
        self.register_buffer('BT', torch.Tensor(self._B))
        if A_learnable:
            self.AT = nn.Parameter(self.AT)
        if B_learnable:
            self.BT = nn.Parameter(self.BT)
Esempio n. 11
0
    def __init__(self, input_size, hidden_size, memory_size, theta, matrix_type='pl',
                discretizer = 'zoh',nonlinearity='sigmoid', A_learnable = False, B_learnable = False):
        super(LMU, self).__init__()

        ### SIZE
        self.k = input_size
        self.n = hidden_size
        self.d = memory_size

        ### PARAMETERS
        self.Wx = nn.Parameter(torch.Tensor(self.n,self.k))
        self.Wh = nn.Parameter(torch.Tensor(self.n,self.n))
        self.Wm = nn.Parameter(torch.Tensor(self.n,self.d))
        self.ex = nn.Parameter(torch.Tensor(1,self.k))
        self.eh = nn.Parameter(torch.Tensor(1,self.n))
        self.em = nn.Parameter(torch.Tensor(1,self.d))

        if matrix_type=='pl':   #For Legendre Memory Unit
            order=self.d
            Q = np.arange(order, dtype=np.float64)
            R = (2 * Q + 1)[:, None] / theta
            j, i = np.meshgrid(Q, Q)
            A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
            B = (-1.0) ** Q[:, None] * R
            C = np.ones((1, order))
            D = np.zeros((1,))
            self._ss = cont2discrete((A, B, C, D), dt=0.01, method=discretizer)
            self._A = self._ss.A
            self._B = self._ss.B
        elif matrix_type=='p':  #For Pade Memory Unit
            order=self.d
            Q=np.arange(order,dtype=np.float64)
            V=(order+Q+1)*(order-Q)/(Q+1)/theta
            A=np.zeros([order,order],dtype=np.float64)
            B=np.zeros([order,1],dtype=np.float64)
            A[0,:]=-V[0]
            A[1:order,0:order-1]=np.diag(V[1:order])
            B[0]=V[0]
            C = np.ones((1, order))
            D = np.zeros((1,))
            self._ss = cont2discrete((A, B, C, D), dt=0.01, method=discretizer)
            self._A = self._ss.A
            self._B = self._ss.B
        elif matrix_type=='pb':  #For Bernstein Memory Unit
            order=self.d
            Q = np.arange(order, dtype=np.float64)
            R = (2 * Q + 1)[:, None] / theta
            j, i = np.meshgrid(Q, Q)
            A_leg = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
            B_leg = (-1.0) ** Q[:, None] * R
            C = np.ones((1, order))
            D = np.zeros((1,))
            M=np.zeros([order,order],dtype=np.float64)
            M_inv=np.zeros([order,order],dtype=np.float64)
            n=order-1 #degree of polynomial
            for j in range(0,n+1):
              for k in range(0,n+1):
                ll=max(0,j+k-n)
                ul=min(j,k)+1
                sum=0.0
                for i in range(ll,ul):
                  sum=sum+((-1.0)**(k+i))*(comb(k,i)**2)*comb(n-k,j-i)
                M[j,k]=sum/comb(n,j)

                sum=0.0
                for i in range(0,j+1):
                  sum=sum+(-1.0)**(j+i)*comb(j,i)**2/comb(n+j,k+i)
                M_inv[j,k]=(2*j+1)/(n+j+1)*comb(n,k)*sum

            M=10*np.tanh(M/10)
            M_inv=10*np.tanh(M_inv/10)

            A_1=np.matmul(M,A_leg)
            A=np.matmul(A_1,M_inv)
            B=np.matmul(M,B_leg)

            self._ss = cont2discrete((A, B, C, D), dt=0.01, method=discretizer)
            self._A = self._ss.A
            self._B = self._ss.B

        ### NON-LINEARITY
        self.nl = nonlinearity
        if self.nl == 'sigmoid':
            self.act = nn.Sigmoid()
        elif self.nl == 'tanh':
            self.act = nn.Tanh()
        else:
            self.act = nn.ReLU()

        ### INITIALIZATION
        torch.nn.init.xavier_normal_(self.Wm)    ### UNDOCUMENTED CHANGE!
        torch.nn.init.xavier_normal_(self.Wx)
        torch.nn.init.xavier_normal_(self.Wh)
        torch.nn.init.zeros_(self.em)
        torch.nn.init.uniform_(self.ex, -np.sqrt(3/self.d), np.sqrt(3/self.d))
        torch.nn.init.uniform_(self.eh, -np.sqrt(3/self.d), np.sqrt(3/self.d))


        #### TRIAL
        self.register_buffer('AT', torch.Tensor(self._A))
        self.register_buffer('BT', torch.Tensor(self._B))
        if A_learnable:
            self.AT = nn.Parameter(self.AT)
        if B_learnable:
            self.BT = nn.Parameter(self.BT)
Esempio n. 12
0
File: lmu.py Progetto: ino09/lmu
    def __init__(
            self,
            units,
            order,
            theta,  # relative to dt=1
            method="zoh",
            realizer=Identity(),
            factory=LegendreDelay,
            trainable_input_encoders=True,
            trainable_hidden_encoders=True,
            trainable_memory_encoders=True,
            trainable_input_kernel=True,
            trainable_hidden_kernel=True,
            trainable_memory_kernel=True,
            trainable_forget_input_kernel=False,
            trainable_forget_hidden_kernel=False,
            trainable_forget_bias=False,
            trainable_A=False,
            trainable_B=False,
            input_encoders_initializer="lecun_uniform",
            hidden_encoders_initializer="lecun_uniform",
            memory_encoders_initializer=Constant(0),  # 'lecun_uniform',
            input_kernel_initializer="glorot_normal",
            hidden_kernel_initializer="glorot_normal",
            memory_kernel_initializer="glorot_normal",
            forget_input_kernel_initializer=Constant(1),
            forget_hidden_kernel_initializer=Constant(1),
            forget_bias_initializer=Constant(0),
            hidden_activation="tanh",
            input_activation="linear",
            gate_activation="linear",
            **kwargs):
        super().__init__(**kwargs)

        self.units = units
        self.order = order
        self.theta = theta
        self.method = method
        self.realizer = realizer
        self.factory = factory
        self.trainable_input_encoders = trainable_input_encoders
        self.trainable_hidden_encoders = trainable_hidden_encoders
        self.trainable_memory_encoders = trainable_memory_encoders
        self.trainable_input_kernel = trainable_input_kernel
        self.trainable_hidden_kernel = trainable_hidden_kernel
        self.trainable_memory_kernel = trainable_memory_kernel
        self.trainable_forget_input_kernel = (trainable_forget_input_kernel, )
        self.trainable_forget_hidden_kernel = trainable_forget_hidden_kernel
        self.trainable_forget_bias = trainable_forget_bias
        self.trainable_A = trainable_A
        self.trainable_B = trainable_B

        self.input_encoders_initializer = initializers.get(
            input_encoders_initializer)
        self.hidden_encoders_initializer = initializers.get(
            hidden_encoders_initializer)
        self.memory_encoders_initializer = initializers.get(
            memory_encoders_initializer)
        self.input_kernel_initializer = initializers.get(
            input_kernel_initializer)
        self.hidden_kernel_initializer = initializers.get(
            hidden_kernel_initializer)
        self.memory_kernel_initializer = initializers.get(
            memory_kernel_initializer)
        self.forget_input_kernel_initializer = initializers.get(
            forget_input_kernel_initializer)
        self.forget_hidden_kernel_initializer = initializers.get(
            forget_hidden_kernel_initializer)
        self.forget_bias_initializer = initializers.get(
            forget_bias_initializer)

        self.hidden_activation = activations.get(hidden_activation)
        self.input_activation = activations.get(input_activation)
        self.gate_activation = activations.get(gate_activation)

        self._realizer_result = realizer(factory(theta=theta,
                                                 order=self.order))
        self._ss = cont2discrete(self._realizer_result.realization,
                                 dt=1.0,
                                 method=method)
        self._A = self._ss.A - np.eye(order)  # puts into form: x += Ax
        self._B = self._ss.B
        self._C = self._ss.C
        assert np.allclose(self._ss.D, 0)  # proper LTI

        # assert self._C.shape == (1, self.order)
        # C_full = np.zeros((self.units, self.order, self.units))
        # for i in range(self.units):
        #     C_full[i, :, i] = self._C[0]
        # decoder_initializer = Constant(
        #     C_full.reshape(self.units*self.order, self.units))

        # TODO: would it be better to absorb B into the encoders and then
        # initialize it appropriately? trainable encoders+B essentially
        # does this in a low-rank way

        # if the realizer is CCF then we get the following two constraints
        # that could be useful for efficiency
        # assert np.allclose(self._ss.B[1:], 0)  # CCF
        # assert np.allclose(self._ss.B[0], self.order**2)

        self.state_size = (self.units, self.order)
        self.output_size = self.units
Esempio n. 13
0
File: lmu.py Progetto: ino09/lmu
    def __init__(
            self,
            units,
            order,
            theta=100,  # relative to dt=1
            method="euler",
            return_states=False,
            realizer=Identity(),
            factory=LegendreDelay,
            trainable_encoders=True,
            trainable_decoders=True,
            trainable_dt=False,
            trainable_A=False,
            trainable_B=False,
            encoder_initializer=InputScaled(1.0),  # TODO
            decoder_initializer=None,  # TODO
            hidden_activation="linear",  # TODO
            output_activation="tanh",  # TODO
            **kwargs):
        super().__init__(**kwargs)

        self.units = units
        self.order = order
        self.theta = theta
        self.method = method
        self.return_states = return_states
        self.realizer = realizer
        self.factory = factory
        self.trainable_encoders = trainable_encoders
        self.trainable_decoders = trainable_decoders
        self.trainable_dt = trainable_dt
        self.trainable_A = trainable_A
        self.trainable_B = trainable_B

        self._realizer_result = realizer(factory(theta=theta,
                                                 order=self.order))
        self._ss = self._realizer_result.realization
        self._A = self._ss.A
        self._B = self._ss.B
        self._C = self._ss.C
        assert np.allclose(self._ss.D, 0)  # proper LTI

        self.encoder_initializer = initializers.get(encoder_initializer)
        self.dt_initializer = initializers.get(Constant(1.0))

        if decoder_initializer is None:
            assert self._C.shape == (1, self.order)
            C_full = np.zeros((self.units, self.order, self.units))
            for i in range(self.units):
                C_full[i, :, i] = self._C[0]
            decoder_initializer = Constant(
                C_full.reshape(self.units * self.order, self.units))

        self.decoder_initializer = initializers.get(decoder_initializer)
        self.hidden_activation = activations.get(hidden_activation)
        self.output_activation = activations.get(output_activation)

        # TODO: would it be better to absorb B into the encoders and then
        # initialize it appropriately? trainable encoders+B essentially
        # does this in a low-rank way

        # if the realizer is CCF then we get the following two constraints
        # that could be useful for efficiency
        # assert np.allclose(self._ss.B[1:], 0)  # CCF
        # assert np.allclose(self._ss.B[0], self.order**2)

        if not (self.trainable_dt or self.trainable_A or self.trainable_B):
            # This is a hack to speed up parts of the computational graph
            # that are static. This is not a general solution.
            ss = cont2discrete(self._ss, dt=1.0, method=self.method)
            AT = K.variable(ss.A.T)
            B = K.variable(ss.B.T[None, ...])
            self._solver = lambda: (AT, B)

        elif self.method == "euler":
            self._solver = self._euler

        elif self.method == "zoh":
            self._solver = self._zoh

        else:
            raise NotImplementedError("Unknown method='%s'" % self.method)

        self.state_size = self.units * self.order  # flattened
        self.output_size = self.state_size if return_states else self.units
Esempio n. 14
0
def figure_pca(targets):
    orders = [3, 6, 9, 12, 15, 27]
    theta = 10.
    dt = 0.01
    T = theta

    length = int(T / dt)
    t = np.linspace(0, T - dt, length)
    t_norm = np.linspace(0, 1, len(t))
    cmap = sns.diverging_palette(h_neg=34,
                                 h_pos=215,
                                 s=99,
                                 l=66,
                                 sep=1,
                                 center="dark",
                                 as_cmap=True)

    class MidpointNormalize(colors.Normalize):
        """Stolen from http://matplotlib.org/users/colormapnorms.html"""
        def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
            self.midpoint = midpoint
            colors.Normalize.__init__(self, vmin, vmax, clip)

        def __call__(self, value, clip=None):
            # I'm ignoring masked values and all kinds of edge cases to make a
            # simple example...
            x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
            return np.ma.masked_array(np.interp(value, x, y))

    with sns.axes_style('white'):
        with sns.plotting_context('paper', font_scale=2.8):
            pylab.figure(figsize=(22, 7))
            gs = gridspec.GridSpec(2, len(orders), height_ratios=[1.3, 1])

            for k, order in enumerate(orders):
                F = PadeDelay(theta, order)
                A = F.A
                dA, dB, _, _ = cont2discrete(F, dt=dt).ss

                dx = np.empty((length, len(F)))
                x = np.empty((length, len(F)))
                x[0, :] = dB.squeeze()  # the x0 from delta input

                for j in range(length - 1):
                    dx[j, :] = A.dot(x[j, :])
                    x[j + 1, :] = dA.dot(x[j, :])
                dx[-1, :] = A.dot(x[-1, :])

                # Compute PCA of trajectory for top half
                pca = PCA(x, standardize=False)
                p = pca.Y[:, :3]

                logging.info("%d Accounted Variance: %s", order,
                             np.sum(pca.fracs[:3]) / np.sum(pca.fracs))

                # Compute curve for bottom half (and color map center)
                dist = np.cumsum(np.linalg.norm(dx, axis=1))
                dist = dist / np.max(dist)
                infl = np.where((np.diff(np.diff(dist)) >= 0)
                                & (t_norm[:-2] >= 0))[0][-1]
                cnorm = MidpointNormalize(midpoint=t_norm[infl])

                ax = plt.subplot(gs[k], projection='3d')
                ax.set_title(r"$q = %d$" % order).set_y(1.1)

                # Draw in reverse order so the start is on top
                ax.scatter(p[::-1, 0],
                           p[::-1, 1],
                           p[::-1, 2],
                           lw=5,
                           c=t_norm[::-1],
                           cmap=cmap,
                           norm=cnorm,
                           alpha=0.5)
                ax.set_xticks([])
                ax.set_yticks([])
                ax.set_zticks([])
                if k == 0:
                    ax.annotate('PCA',
                                xy=(-50, 150),
                                ha='left',
                                va='top',
                                size=22,
                                rotation=90,
                                bbox=None,
                                xycoords='axes points')
                ax.view_init(elev=25, azim=150)

                ax = plt.subplot(gs[len(orders) + k])
                ax.scatter(t, dist, lw=5, c=t_norm, cmap=cmap, norm=cnorm)
                ax.vlines(t[infl], 0, 1, linestyle='--', lw=3, alpha=0.7)
                if k == 0:
                    ax.set_yticks([0, 1])
                    ax.set_ylabel("Length of Curve")
                else:
                    ax.set_yticks([])
                ax.set_xticks([0, theta / 2, theta])
                ax.set_xticklabels(
                    [r"$0$", r"$\frac{\theta}{2}$", r"$\theta$"])
                ax.xaxis.set_tick_params(pad=10)
                ax.set_xlabel("Time [s]", labelpad=20)

                sns.despine(offset=10, ax=ax)

            savefig(targets[0])