Exemplo n.º 1
0
 def testFewPoints(self):
     # check sanity of paths with less than 3 points
     path1=[[4.3,0.8]]
     path2=numpy.array([[1,2],[2,4]])
     m=4
     d=2
     s=iisignature.prepare(d,m,"cosx")
     s_a=iisignature.prepare(d,2,"cosx")
     length=iisignature.siglength(d,m)
     loglength=iisignature.logsiglength(d,m)
     loglength_a=iisignature.logsiglength(d,2)
     blankLogSig=numpy.zeros(loglength)
     blankLogSig_a=numpy.zeros(loglength_a)
     blankSig=numpy.zeros(length)
     self.assertLess(diff(iisignature.sig(path1,m),blankSig),0.000000001)
     self.assertTrue(numpy.array_equal(iisignature.sig(path1,m,2),numpy.zeros([0,length])))
     self.assertLess(diff(iisignature.logsig(path1,s,"C"),blankLogSig),0.000000001)
     self.assertLess(diff(iisignature.logsig(path1,s,"O"),blankLogSig),0.000000001)
     self.assertLess(diff(iisignature.logsig(path1,s,"S"),blankLogSig),0.000000001)
     self.assertLess(diff(iisignature.logsig(path1,s,"X"),blankSig),0.000000001)
     self.assertLess(diff(iisignature.logsig(path1,s_a,"A"),blankLogSig_a),0.000000001)
     blankLogSig[:d]=path2[1]-path2[0]
     blankLogSig_a[:d]=path2[1]-path2[0]
     blankSig[:d]=path2[1]-path2[0]
     self.assertLess(diff(iisignature.logsig(path2,s,"C"),blankLogSig),0.000001)
     self.assertLess(diff(iisignature.logsig(path2,s,"O"),blankLogSig),0.000001)
     self.assertLess(diff(iisignature.logsig(path2,s,"S"),blankLogSig),0.000001)
     self.assertLess(diff(iisignature.logsig(path2,s,"X"),blankSig),0.000001)
     self.assertLess(diff(iisignature.logsig(path2,s_a,"A"),blankLogSig_a),0.000001)
Exemplo n.º 2
0
 def testFewPoints(self):
     # check sanity of paths with less than 3 points
     path1=[[4.3,0.8]]
     path2=numpy.array([[1,2],[2,4]])
     m=4
     d=2
     s=iisignature.prepare(d,m,"cosx")
     s_a=iisignature.prepare(d,2,"cosx")
     length=iisignature.siglength(d,m)
     loglength=iisignature.logsiglength(d,m)
     loglength_a=iisignature.logsiglength(d,2)
     blankLogSig=numpy.zeros(loglength)
     blankLogSig_a=numpy.zeros(loglength_a)
     blankSig=numpy.zeros(length)
     self.assertLess(diff(iisignature.sig(path1,m),blankSig),0.000000001)
     self.assertTrue(numpy.array_equal(iisignature.sig(path1,m,2),numpy.zeros([0,length])))
     self.assertLess(diff(iisignature.logsig(path1,s,"C"),blankLogSig),0.000000001)
     self.assertLess(diff(iisignature.logsig(path1,s,"O"),blankLogSig),0.000000001)
     self.assertLess(diff(iisignature.logsig(path1,s,"S"),blankLogSig),0.000000001)
     self.assertLess(diff(iisignature.logsig(path1,s,"X"),blankSig),0.000000001)
     self.assertLess(diff(iisignature.logsig(path1,s_a,"A"),blankLogSig_a),0.000000001)
     blankLogSig[:d]=path2[1]-path2[0]
     blankLogSig_a[:d]=path2[1]-path2[0]
     blankSig[:d]=path2[1]-path2[0]
     self.assertLess(diff(iisignature.logsig(path2,s,"C"),blankLogSig),0.000001)
     self.assertLess(diff(iisignature.logsig(path2,s,"O"),blankLogSig),0.000001)
     self.assertLess(diff(iisignature.logsig(path2,s,"S"),blankLogSig),0.000001)
     self.assertLess(diff(iisignature.logsig(path2,s,"X"),blankSig),0.000001)
     self.assertLess(diff(iisignature.logsig(path2,s_a,"A"),blankLogSig_a),0.000001)
Exemplo n.º 3
0
def doplot(n_samples=100, m=5, d=3, nplot=None):
    x = vanilla_data(n_samples=n_samples, d=d)
    # do something to make it interesting: if last coord ever hist 1, then zero out the rest
    hitting_event = np.maximum.accumulate(np.abs(x[:, -1])) > 3
    x[hitting_event, :-1] = 3
    # x[:,:-1] = np.where(cumulative_maximum > 1, 0, x[:,:-1])
    # x = x - x.mean(axis=0)
    # x = x / x.std(axis=0)
    # x = np.exp(x)
    # x = x / x.sum(axis=0)
    s = iis.prepare(d, m)
    if nplot is None:
        nplot = n_samples
    ii = n_samples // nplot
    figure(1)
    clf()
    ax = subplot(2, 1, 1)
    plot(x, alpha=0.5)
    ylabel('path')
    t = list()
    data = list()
    for i in range(ii, n_samples, ii):
        t.append(i)
        temp = iis.logsig(x[:i], s)
        data.append(temp)
    data = np.array(data)
    assert data.shape[1] == iis.logsiglength(d, m), 'not match!'
    t = np.array(t)
    ax = subplot(2, 1, 2)
    plot(t, (data.T / (t**0)).T, alpha=0.5)
    # plot(t, (data[:,-1].T / (t ** 0)).T, alpha=0.5)
    ylabel('logsig')
    xlabel('t')
    show()
    return locals()
Exemplo n.º 4
0
def ComputeLogsigFeatures(path, number_of_segment, deg_of_logsig):
    """
    The implementation of computing the log-signature of segments of path.

    path: dimension (sample_size,n, d)

    number_of_segment: the number of segments

    deg_of_logsig: the degree of the log-signature 
    """
    nT = int(np.shape(path)[1])
    dim_path = int(np.shape(path)[-1])
    t_vec = np.linspace(1, nT, number_of_segment + 1)
    t_vec = [int(round(x)) for x in t_vec]
    s = iisignature.prepare(dim_path, deg_of_logsig)
    MultiLevelLogSig = []
    for k in range(int(np.shape(path)[0])):
        tmpMultiLevelLogSig = np.zeros(
            (number_of_segment,
             iisignature.logsiglength(dim_path, deg_of_logsig)))
        for i in range(number_of_segment):
            temp_path = path[k][t_vec[i] - 1:t_vec[i + 1], :]
            temp_start = temp_path[0]
            tmpMultiLevelLogSig[i, :] = iisignature.logsig(temp_path, s)
        MultiLevelLogSig.append(tmpMultiLevelLogSig)
    return np.float32(np.array(MultiLevelLogSig))
Exemplo n.º 5
0
def doplot(n_samples=100, m=5, d=3, nplot=None):
    x = vanilla_data(n_samples=n_samples, d=d)
    # do something to make it interesting: if last coord ever hist 1, then zero out the rest
    hitting_event = np.maximum.accumulate(np.abs(x[:,-1])) > 3
    x[hitting_event,:-1] = 3
    # x[:,:-1] = np.where(cumulative_maximum > 1, 0, x[:,:-1])
    # x = x - x.mean(axis=0)
    # x = x / x.std(axis=0)
    # x = np.exp(x)
    # x = x / x.sum(axis=0)
    s = iis.prepare(d, m)
    if nplot is None:
        nplot = n_samples
    ii = n_samples // nplot
    figure(1)
    clf()
    ax = subplot(2,1,1)
    plot(x, alpha=0.5)
    ylabel('path')
    t = list()
    data = list()
    for i in range(ii, n_samples, ii):
        t.append(i)
        temp = iis.logsig(x[:i], s)
        data.append(temp)
    data = np.array(data)
    assert data.shape[1] == iis.logsiglength(d, m), 'not match!'
    t = np.array(t)
    ax = subplot(2,1,2)
    plot(t, (data.T / (t ** 0)).T, alpha=0.5)
    # plot(t, (data[:,-1].T / (t ** 0)).T, alpha=0.5)
    ylabel('logsig')
    xlabel('t')
    show()
    return locals()
Exemplo n.º 6
0
def ComputeMultiLevelLogsig1dBM(BM_paths, number_of_segment, depth_of_tensors, T):
    """
    Compute the log-signature of all samples
    """
    no_of_samples = np.shape(BM_paths)[0]

    if depth_of_tensors == 1:
        MultiLevelLogSigs = np.zeros(
            [no_of_samples, 2 * number_of_segment], dtype=float)
    else:
        MultiLevelLogSigs = np.zeros([no_of_samples, iisignature.logsiglength(
            2, depth_of_tensors) * number_of_segment], dtype=float)

    MultiStart = np.zeros([no_of_samples, number_of_segment], dtype=float)

    s = iisignature.prepare(2, depth_of_tensors)
    print('start computing the logsigs of degree %d:' % depth_of_tensors)
    for j in tqdm(range(0, no_of_samples, 1), total=no_of_samples):
        BM = TimeJointPath(BM_paths[j, :], T)
        result2 = ComputeMultiLevelSig(
            BM, number_of_segment, depth_of_tensors, s)
        #MultiLevelSigs[j] = result2['MultiLevelSig']
        # print(result2)
        MultiLevelLogSigs[j] = result2['MultiLevelLogSig']
        MultiStart[j] = result2['MultiStart']
    n_input = int(np.shape(MultiLevelLogSigs)[1] / number_of_segment)
    X_logsig_start = MultiLevelLogSigs.reshape(
        (np.shape(MultiLevelLogSigs)[0], number_of_segment, n_input))
    batch_x0 = MultiStart.reshape(
        (np.shape(MultiLevelLogSigs)[0], number_of_segment, 1))
    X_logsig_start = np.concatenate((X_logsig_start, batch_x0), axis=2)
    return X_logsig_start
Exemplo n.º 7
0
    def consistency(self, coropa, dim, level):
        #numpy.random.seed(21)
        s = iisignature.prepare(dim,level,"coshx" if coropa else "cosx")
        myinfo = {"level":level, "dimension":dim,
                  "methods": ("COSAX" if level <= 2 else "COSX"),
                  "basis":("Standard Hall" if coropa else "Lyndon")}
        self.assertEqual(iisignature.info(s),myinfo)
        path = numpy.random.uniform(size=(10,dim))
        basis = iisignature.basis(s)
        logsig = iisignature.logsig(path,s)
        sig = iisignature.sig(path,level)

        #check lengths
        self.assertEqual(len(basis),iisignature.logsiglength(dim,level))
        self.assertEqual((len(basis),),logsig.shape)
        self.assertEqual(sig.shape,(iisignature.siglength(dim,level),))

        #calculate a signature from logsig
        expanded_logsig = [numpy.zeros(dim ** m) for m in range(1,level + 1)]
        for coeff, expression in zip(logsig,basis):
            values, depth = valueOfBracket(expression,dim)
            expanded_logsig[depth - 1]+=values * coeff
        calculated_sig = numpy.concatenate(exponentiateTensor(expanded_logsig))
        self.assertLess(diff(sig,calculated_sig),0.00001)

        #calculate a log signature from sig
        fullLogSig = numpy.concatenate(logTensor(splitConcatenatedTensor(sig,dim,level)))
        fullLogSigLib = iisignature.logsig(path,s,"x")
        diff1 = numpy.max(numpy.abs(fullLogSigLib - fullLogSig))
        #print
        #(numpy.vstack([fullLogSig,fullLogSigLib,numpy.abs(fullLogSigLib-fullLogSig)]).transpose())
        self.assertLess(diff1,0.00001)

        basisMatrix = []
        zeros = [numpy.zeros(dim ** m) for m in range(1,level + 1)]
        for expression in basis:
            values, depth = valueOfBracket(expression, dim)
            temp = zeros[depth - 1]
            zeros[depth - 1] = values
            basisMatrix.append(numpy.concatenate(zeros))
            zeros[depth - 1] = temp
        calculatedLogSig = lstsq(numpy.transpose(basisMatrix),fullLogSig)[0]
        diff2 = numpy.max(numpy.abs(logsig - calculatedLogSig))
        self.assertLess(diff2,0.00001)

        #check consistency of methods
        slowLogSig = iisignature.logsig(path,s,"o")
        diffs = numpy.max(numpy.abs(slowLogSig - calculatedLogSig))
        self.assertLess(diffs,0.00001)

        sigLogSig = iisignature.logsig(path,s,"s")
        diffs = numpy.max(numpy.abs(sigLogSig - calculatedLogSig))
        self.assertLess(diffs,0.00001)

        if level < 3:
            areaLogSig = iisignature.logsig(path,s,"a")
            diffs = numpy.max(numpy.abs(areaLogSig - calculatedLogSig))
            self.assertLess(diffs,0.00001)
Exemplo n.º 8
0
def build_lin_Logsig_rnn_model(input_shape, n_hidden_neurons, output_shape,
                               no_of_segments, deg_of_logsig, learning_rate,
                               drop_rate_1, drop_rate_2, filter_size):
    """
    The LP_logsig_rnn model
    """
    logsiglen = iisignature.logsiglength(filter_size, deg_of_logsig)

    input_layer = Input(shape=input_shape)

    # Convolutional layer

    lin_projection_layer = Conv2D(32, (1, 1),
                                  strides=(1, 1),
                                  data_format='channels_last')(input_layer)
    lin_projection_layer = Conv2D(
        16, (5, 1), strides=(1, 1),
        data_format='channels_last')(lin_projection_layer)

    reshape = Reshape((input_shape[0] - 4, 16 * 25))(lin_projection_layer)
    lin_projection_layer = Conv1D(filter_size, 1)(reshape)

    mid_output = Lambda(lambda x: SP(x, no_of_segments),
                        output_shape=(no_of_segments, filter_size),
                        name='start_position')(lin_projection_layer)

    # Computing Logsig layer

    hidden_layer = Lambda(lambda x:CLF(x, no_of_segments, deg_of_logsig), \
                          output_shape=(no_of_segments,logsiglen),name='logsig')(lin_projection_layer)
    hidden_layer = Reshape((no_of_segments, logsiglen))(hidden_layer)
    # Batchnormalization
    BN_layer = BatchNormalization()(hidden_layer)

    mid_input = layers.concatenate([mid_output, BN_layer], axis=-1)

    # LSTM
    lstm_layer = LSTM(units=n_hidden_neurons, return_sequences=True)(mid_input)

    # Dropout
    drop_layer = Dropout(drop_rate_2)(lstm_layer)
    output_layer = Flatten()(drop_layer)
    output_layer = Dense(output_shape, activation='softmax')(output_layer)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.summary()
    adam = Adam(lr=learning_rate,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=None,
                decay=0.0,
                amsgrad=False)
    #     sgd = SGD(lr=learning_rate, decay=0.95,momentum=0.9)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
Exemplo n.º 9
0
    def consistency(self, coropa, dim, level):
        #numpy.random.seed(21)
        s = iisignature.prepare(dim,level,"coshx" if coropa else "cosx")
        myinfo = {"level":level, "dimension":dim,
                  "methods": ("COSAX" if level <= 2 else "COSX"),
                  "basis":("Standard Hall" if coropa else "Lyndon")}
        self.assertEqual(iisignature.info(s),myinfo)
        path = numpy.random.uniform(size=(10,dim))
        basis = iisignature.basis(s)
        logsig = iisignature.logsig(path,s)
        sig = iisignature.sig(path,level)

        #check lengths
        self.assertEqual(len(basis),iisignature.logsiglength(dim,level))
        self.assertEqual((len(basis),),logsig.shape)
        self.assertEqual(sig.shape,(iisignature.siglength(dim,level),))

        #calculate a signature from logsig
        expanded_logsig = [numpy.zeros(dim ** m) for m in range(1,level + 1)]
        for coeff, expression in zip(logsig,basis):
            values, depth = valueOfBracket(expression,dim)
            expanded_logsig[depth - 1]+=values * coeff
        calculated_sig = numpy.concatenate(exponentiateTensor(expanded_logsig))
        self.assertLess(diff(sig,calculated_sig),0.00001)

        #calculate a log signature from sig
        fullLogSig = numpy.concatenate(logTensor(splitConcatenatedTensor(sig,dim,level)))
        fullLogSigLib = iisignature.logsig(path,s,"x")
        diff1 = numpy.max(numpy.abs(fullLogSigLib - fullLogSig))
        #print
        #(numpy.vstack([fullLogSig,fullLogSigLib,numpy.abs(fullLogSigLib-fullLogSig)]).transpose())
        self.assertLess(diff1,0.00001)

        basisMatrix = []
        zeros = [numpy.zeros(dim ** m) for m in range(1,level + 1)]
        for expression in basis:
            values, depth = valueOfBracket(expression, dim)
            temp = zeros[depth - 1]
            zeros[depth - 1] = values
            basisMatrix.append(numpy.concatenate(zeros))
            zeros[depth - 1] = temp
        calculatedLogSig = lstsq(numpy.transpose(basisMatrix),fullLogSig)[0]
        diff2 = numpy.max(numpy.abs(logsig - calculatedLogSig))
        self.assertLess(diff2,0.00001)

        #check consistency of methods
        slowLogSig = iisignature.logsig(path,s,"o")
        diffs = numpy.max(numpy.abs(slowLogSig - calculatedLogSig))
        self.assertLess(diffs,0.00001)

        sigLogSig = iisignature.logsig(path,s,"s")
        diffs = numpy.max(numpy.abs(sigLogSig - calculatedLogSig))
        self.assertLess(diffs,0.00001)

        if level < 3:
            areaLogSig = iisignature.logsig(path,s,"a")
            diffs = numpy.max(numpy.abs(areaLogSig - calculatedLogSig))
            self.assertLess(diffs,0.00001)
Exemplo n.º 10
0
 def infer_shape(self,node,shapes):
     #d=shapes[0][-1]
     s,method=_prepared_obeject_store[node.inputs[1].get_value()]
     m=iisignature.info(s)["level"]
     d=iisignature.info(s)["dimension"]
     if "x" in method or "X" in method:
         length=iisignature.siglength(d,m)
     else:
         length=iisignature.logsiglength(d,m)
     return [shapes[0][:-2]+(length,)]
Exemplo n.º 11
0
def build_lin_Logsig_rnn_model(input_shape, n_hidden_neurons, output_shape,
                               no_of_segments, deg_of_logsig, learning_rate,
                               drop_rate_1, drop_rate_2, filter_size):
    logsiglen = iisignature.logsiglength(filter_size + 1, deg_of_logsig)
    input_layer = Input(shape=input_shape)
    lin_projection_layer = Conv2D(32, (1, 1),
                                  strides=(1, 1),
                                  data_format='channels_last')(input_layer)
    lin_projection_layer = Conv2D(
        16, (3, 1),
        strides=(1, 1),
        padding='same',
        data_format='channels_last')(lin_projection_layer)
    reshape = Reshape((input_shape[0], 16 * 19))(lin_projection_layer)

    lin_projection_layer = Conv1D(filter_size, 1, activation='relu')(reshape)
    drop_layer_1 = Dropout(drop_rate_1)(lin_projection_layer)
    ps_layer = Lambda(lambda x: PS(x),
                      output_shape=(input_shape[0], filter_size),
                      name='partial_sum')(drop_layer_1)
    cat_layer = Lambda(lambda x: Cat_T(x, input_shape[0]),
                       output_shape=(input_shape[0], filter_size + 1),
                       name='add_time')(ps_layer)

    mid_output = Lambda(lambda x: SP(x, no_of_segments),
                        output_shape=(no_of_segments, filter_size + 1),
                        name='start_position')(cat_layer)

    hidden_layer_1 = Lambda(lambda x:CLF(x, no_of_segments, deg_of_logsig), \
                          output_shape=(no_of_segments,logsiglen), name='logsig_layer')(cat_layer)
    hidden_layer_2 = Reshape((no_of_segments, logsiglen))(hidden_layer_1)
    BN_layer_1 = BatchNormalization()(hidden_layer_2)

    mid_input = layers.concatenate([mid_output, BN_layer_1], axis=-1)

    lstm_layer = LSTM(units=n_hidden_neurons, return_sequences=True)(mid_input)
    #     hidden_layer_3 = Dense(512, activation='relu')(lstm_layer)
    #     BN_layer_2 = BatchNormalization()(lstm_layer)
    drop_layer_2 = Dropout(drop_rate_2)(lstm_layer)
    output_layer = Flatten()(drop_layer_2)
    output_layer = Dense(output_shape, activation='softmax')(output_layer)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.summary()
    adam = Adam(lr=learning_rate,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=None,
                decay=0.0,
                amsgrad=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
Exemplo n.º 12
0
 def test_logsigbackwards_can_augment_s(self):
     numpy.random.seed(291)
     d=2
     m=7
     pathLength=3
     path = numpy.random.uniform(size=(pathLength,d))
     increment = 0.1*numpy.random.uniform(size=(pathLength,d))
     dFdlogSig = numpy.ones(iisignature.logsiglength(d,m))
     for types in (("x","o","s"),("xh","oh","sh")):
         ss=[iisignature.prepare(d,m,t) for t in types]
         backs=[iisignature.logsigbackprop(dFdlogSig,path,s) for s in ss]
         self.assertTrue(numpy.allclose(backs[0],backs[2]),types[0])
         self.assertTrue(numpy.allclose(backs[1],backs[2]),types[1])
         fwds=[iisignature.logsig(path,s,"s") for s in ss]
         self.assertTrue(numpy.allclose(fwds[0],fwds[2]),types[0])
         self.assertTrue(numpy.allclose(fwds[1],fwds[2]),types[1])
Exemplo n.º 13
0
    def logSig(self, type, m=5):
        numpy.random.seed(291)
        d=2
        pathLength=10
        s=iisignature.prepare(d,m,type)
        path = numpy.random.uniform(size=(pathLength,d))
        path = numpy.cumsum(2 * (path - 0.5),0)#makes it more random-walk-ish, less like a scribble
        increment = 0.01*path
        increment = 0.1*numpy.random.uniform(size=(pathLength,d))

        manualChange = fdDeriv(lambda x:iisignature.logsig(x,s,type),path,increment,4)
        
        dFdlogSig = numpy.ones(iisignature.siglength(d,m) if "X"==type else iisignature.logsiglength(d,m))
        calculatedChange = numpy.sum(increment*iisignature.logsigbackprop(dFdlogSig,path,s,type))
        #print(manualChange, calculatedChange)
        self.assertLess(numpy.abs(manualChange-calculatedChange),0.0001)
Exemplo n.º 14
0
 def test_logsigbackwards_can_augment_s(self):
     numpy.random.seed(291)
     d=2
     m=7
     pathLength=3
     path = numpy.random.uniform(size=(pathLength,d))
     increment = 0.1*numpy.random.uniform(size=(pathLength,d))
     dFdlogSig = numpy.ones(iisignature.logsiglength(d,m))
     for types in (("x","o","s"),("xh","oh","sh")):
         ss=[iisignature.prepare(d,m,t) for t in types]
         backs=[iisignature.logsigbackprop(dFdlogSig,path,s) for s in ss]
         self.assertTrue(numpy.allclose(backs[0],backs[2]),types[0])
         self.assertTrue(numpy.allclose(backs[1],backs[2]),types[1])
         fwds=[iisignature.logsig(path,s,"s") for s in ss]
         self.assertTrue(numpy.allclose(fwds[0],fwds[2]),types[0])
         self.assertTrue(numpy.allclose(fwds[1],fwds[2]),types[1])
Exemplo n.º 15
0
    def logSig(self, type, m=5):
        numpy.random.seed(291)
        d=2
        pathLength=10
        s=iisignature.prepare(d,m,type)
        path = numpy.random.uniform(size=(pathLength,d))
        path = numpy.cumsum(2 * (path - 0.5),0)#makes it more random-walk-ish, less like a scribble
        increment = 0.01*path
        increment = 0.1*numpy.random.uniform(size=(pathLength,d))

        manualChange = fdDeriv(lambda x:iisignature.logsig(x,s,type),path,increment,4)
        
        dFdlogSig = numpy.ones(iisignature.siglength(d,m) if "X"==type else iisignature.logsiglength(d,m))
        calculatedChange = numpy.sum(increment*iisignature.logsigbackprop(dFdlogSig,path,s,type))
        #print(manualChange, calculatedChange)
        self.assertLess(numpy.abs(manualChange-calculatedChange),0.0001)
Exemplo n.º 16
0
def build_lin_Logsig_rnn_model(input_shape, n_hidden_neurons, output_shape,
                               no_of_segments, deg_of_logsig, learning_rate,
                               drop_rate1, drop_rate2, filter_size):
    """
	Construct the LP_logsig_rnn model using the customized operations CLF, Cat_T and PS from cus_layers.py
    """
    logsiglen = iisignature.logsiglength(filter_size, deg_of_logsig)

    input_layer = Input(shape=input_shape)
    # Time path concatenation
    cat_layer = Lambda(lambda x: Cat_T(x, input_shape[0]),
                       output_shape=(input_shape[0],
                                     input_shape[1] + 1))(input_layer)
    # Convolutional layer
    lin_projection_layer = Conv1D(filter_size, 1)(cat_layer)
    # Dropout
    drop_layer_1 = Dropout(drop_rate1)(lin_projection_layer)
    # Cumulative sum
    ps_layer = Lambda(lambda x: PS(x),
                      output_shape=(input_shape[0], filter_size))(drop_layer_1)
    #     BN_layer_0 = BatchNormalization()(lin_projection_layer)
    # Computing Logsig layer
    hidden_layer_1 = Lambda(lambda x:CLF(x, no_of_segments, deg_of_logsig), \
                          output_shape=(no_of_segments,logsiglen))(ps_layer)
    hidden_layer_2 = Reshape((no_of_segments, logsiglen))(hidden_layer_1)
    # Batchnormalization
    BN_layer_1 = BatchNormalization()(hidden_layer_2)
    # LSTM
    lstm_layer = LSTM(units=n_hidden_neurons)(BN_layer_1)
    #     BN_layer_2 = BatchNormalization()(lstm_layer)
    # Dropout
    drop_layer_2 = Dropout(drop_rate2)(lstm_layer)
    output_layer = Dense(output_shape, activation='softmax')(drop_layer_2)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.summary()
    adam = Adam(lr=learning_rate,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=None,
                decay=0.0,
                amsgrad=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
batch_size = bs[dataset_name]
test_size = data_len // 10
train_batches = np.ceil((data_len - test_size) / batch_size).astype(int)
print('number of batches = ', train_batches, ' batch size = ', batch_size)

torch.manual_seed(0)
rng_state = torch.get_rng_state(
)  #seed init to ensure same initial conditions for each training

### eigenvalue path signature ####
if xtra_feat:
    pslevel = 4
    sig_prep = iisignature.prepare(2, pslevel)
    #xtra_feat_length = iisignature.logsiglength(2, pslevel)
    siglength = iisignature.logsiglength(2, pslevel)

    xtra_feat_length = siglength
    if xxtra: xtra_feat_length += 7

else:
    xtra_feat_length = 0

###### preprocess #####
data = []
label = []
mx, mn = -np.inf, np.inf
Alist = None
Aker = None
smax = []
for i in range(len(graph_list)):
Exemplo n.º 18
0
 def perform(self,node,inp,out):
     out[0][0]=np.array(iisignature.logsiglength(inp[0],inp[1]),dtype="int32")
Exemplo n.º 19
0
def setup(obj):
    obj.path = torch.rand(obj.size, dtype=torch.float).numpy()
    shape = obj.size[-3], iisignature.logsiglength(obj.size[-1], obj.depth)
    obj.grad = torch.rand(shape).numpy()
    obj.prepare = iisignature.prepare(obj.path.shape[-1], obj.depth)