def __init__(self, k,
              scaled_lap=scaled_laplacian,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(LocallyConnectedGC, self).__init__(**kwargs)
     
     self.k = k
     self.scaled_lap = scaled_lap
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.supports_masking = True
     self.supports = []
     # k-hop adjacency matrix
     S = K.constant(K.to_dense(calculate_adjacency_k(self.scaled_lap, self.k)))
     self.supports.append(S)
Beispiel #2
0
def K_eval(x, backend=K):
    K = backend
    try:
        return K.get_value(K.to_dense(x))
    except Exception as e:
        try:
            eval_fn = K.function([], [x])
            return eval_fn([])[0]
        except Exception as e:
            return K.eager(K.eval)(x)
    def __init__(
            self,
            k,
            units,
            normalization=False,
            attn_heads=1,
            attn_heads_reduction='concat',  # {'concat', 'average'}
            scaled_lap=scaled_laplacian,
            activation=None,
            use_bias=True,
            kernel_initializer='glorot_uniform',
            bias_initializer='zeros',
            kernel_regularizer=None,
            bias_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            **kwargs):
        if attn_heads_reduction not in {'concat', 'average'}:
            raise ValueError('Possbile reduction methods: concat, average')

        self.k = k
        self.units = units
        self.normalization = normalization
        self.attn_heads = attn_heads  # Number of attention heads (K in the paper)
        self.attn_heads_reduction = attn_heads_reduction  # Eq. 5 and 6 in the paper
        self.scaled_lap = scaled_lap
        self.activation = activations.get(activation)  # Eq. 4 in the paper
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False

        # Populated by build()
        self.kernels = []  # Layer kernels for attention heads
        self.parameters = []
        self.fc = []
        self.biases = []  # Layer biases for attention heads

        self.supports = []
        s = K.constant(
            K.to_dense(calculate_adjacency_k(self.scaled_lap, self.k)))
        self.supports.append(s)

        super(DynamicGC, self).__init__(**kwargs)
 def __init__(self,
              k,
              units,
              scaled_lap=scaled_laplacian,
              inner_act=None,
              activation='sigmoid',
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(RecurrentDGC, self).__init__(**kwargs)
     self.k = k
     self.units = units
     self.state_size = dim
     self.output_size = dim
     self.scaled_lap = scaled_lap
     self.inner_act = inner_act
     self.activation = activations.get(activation)
     self.supports_masking = True
     self.supports = []
     s = K.constant(
         K.to_dense(calculate_adjacency_k(self.scaled_lap, self.k)))
     self.supports.append(s)
Beispiel #5
0
def batch(X, batch_size, seed=0, iterator=True):
    """"
    Partitions a dataset into batches, returning a batch dataset or an iterator.
    :param X: The dataset to batch
    :param batch_size: The size of each batch
    :param seed: The shuffle seed
    :retrun: A tensor batch dataset, or as a numpy iterator.
    """
    # buffer_size = int(1e6)
    buffer_size = X.shape[0]  # For perfect shuffle, buff is the size of X

    if K.is_sparse(X):  # If a sparse tensor
        X = K.to_dense(X)
    elif sp.sparse.issparse(X):  # If a sparse matrix
        X = X.todense()

    batches = Dataset.from_tensor_slices(X). \
        shuffle(buffer_size=buffer_size, seed=seed). \
        batch(batch_size, drop_remainder=True)

    if iterator:
        batches = batches.as_numpy_iterator()

    return batches
Beispiel #6
0
    def test_to_dense_matrix(self):
        test_sparse_matrix = self.generate_test_sparse_matrix()

        assert_allclose(K.to_dense(test_sparse_matrix), test_sparse_matrix.toarray())
Beispiel #7
0
    def test_to_dense(self):
        test_sparse_matrix = self.generate_test_sparse_matrix()
        test_var = K.variable(test_sparse_matrix)

        assert_allclose(K.to_dense(test_var), test_sparse_matrix.toarray())
    def RecognizeSpeech(self, wavsignal, fs):
        '''
		最终做语音识别用的函数,识别一个wav序列的语音
		不过这里现在还有bug
		'''

        #data = self.data
        data = DataSpeech('E:\\语音数据集')
        data.LoadDataList('dev')
        # 获取输入特征
        #data_input = data.GetMfccFeature(wavsignal, fs)
        data_input = data.GetFrequencyFeature(wavsignal, fs)

        list_symbol_dic = data.list_symbol  # 获取拼音列表

        labels = [
            'dong1', 'bei3', 'jun1', 'de5', 'yi4', 'xie1', 'ai4', 'guo2',
            'jiang4', 'shi4', 'ma3', 'zhan4', 'shan1', 'li3', 'du4', 'tang2',
            'ju4', 'wu3', 'su1', 'bing3', 'ai4', 'deng4', 'tie3', 'mei2',
            'deng3', 'ye3', 'fen4', 'qi3', 'kang4', 'zhan4'
        ]
        #labels = [ list_symbol_dic[-1] ]
        #labels = [ list_symbol_dic[-1] ]
        #while(len(labels) < 32):
        #	labels.append(list_symbol_dic[-1])

        feat_out = []
        #print("数据编号",n_start,filename)
        for i in labels:
            if ('' != i):
                n = data.SymbolToNum(i)
                feat_out.append(n)

        print(feat_out)
        labels = feat_out

        x = next(
            self.data_gen(data_input=np.array(data_input),
                          data_labels=np.array(labels),
                          input_length=len(data_input),
                          labels_length=len(labels),
                          batch_size=2))

        [test_input_data, y, test_input_length, label_length], labels = x
        xx = [test_input_data, y, test_input_length, label_length]

        pred = self._model.predict(x=xx)

        print(pred)

        shape = pred[:, :].shape
        print(shape)

        #print(test_input_data)
        y_p = self.test_func([test_input_data])
        print(type(y_p))
        print('y_p:', y_p)

        for j in range(0, 200):
            mean = sum(y_p[0][0][j]) / len(y_p[0][0][j])
            print('max y_p:', max(y_p[0][0][j]), 'min y_p:', min(y_p[0][0][j]),
                  'mean y_p:', mean, 'mid y_p:', y_p[0][0][j][100])
            print('argmin:', np.argmin(y_p[0][0][j]), 'argmax:',
                  np.argmax(y_p[0][0][j]))
            count = 0
            for i in y_p[0][0][j]:
                if (i < mean):
                    count += 1
            print('count:', count)

        print(K.is_sparse(y_p))
        y_p = K.to_dense(y_p)
        print(K.is_sparse(y_p))
        #y_p = tf.sparse_to_dense(y_p,(2,397),1417,0)
        print(test_input_length.T)
        test_input_length = test_input_length.reshape(2, 1)
        func_in_len = self.test_func_input_length([test_input_length])
        print(type(func_in_len))
        #in_len = np.ones(shape[0]) * shape[1]
        ctc_decoded = K.ctc_decode(y_p, input_length=func_in_len)

        print(ctc_decoded)
        #ctc_decoded = ctc_decoded[0][0]
        #out = K.get_value(ctc_decoded)[:,:64]
        #pred = self._model.predict_on_batch([data_input, labels_num, input_length, label_length])
        return pred[0][0]

        pass