コード例 #1
0
ファイル: train.py プロジェクト: zwlshine/CNTK
def create_model(input_dim):
    row = sequence.input_variable(shape=input_dim)
    col = sequence.input_variable(shape=input_dim)
    rowh = Sequential([Embedding(opt.embed), Stabilizer(), Dropout(opt.dropout)])(row)
    colh = Sequential([Embedding(opt.embed), Stabilizer(), Dropout(opt.dropout)])(col)

    x = C.splice(rowh, colh, axis=-1)
    x = lightlstm(opt.embed, opt.nhid)(x)
    x = For(range(opt.layer-1), lambda: lightlstm(opt.nhid, opt.nhid))(x)
    rowh = C.slice(x, -1, opt.nhid * 0, opt.nhid * 1)
    colh = C.slice(x, -1, opt.nhid * 1, opt.nhid * 2)

    row_predict = Sequential([Dropout(opt.dropout), Dense(input_dim)])(rowh)
    col_predict = Sequential([Dropout(opt.dropout), Dense(input_dim)])(colh)

    # variable : row label and col label
    row_label = sequence.input_variable(shape=input_dim)
    col_label = sequence.input_variable(shape=input_dim)
    model = C.combine([row_predict, col_predict])

    return {'row':       row,
            'col':       col,
            'row_label': row_label,
            'col_label': col_label,
            'model':     model}
コード例 #2
0
def test_sequential_convolution_without_reduction_dim():
    c = Convolution(3, init=np.array([4., 2., 1.], dtype=np.float32), sequential=True, pad=False, reduction_rank=0, bias=False)
    c.update_signature(Sequence[Tensor[()]])  # input is a sequence of scalars
    data = [np.array([2., 6., 4., 8., 6.])]   # like a short audio sequence, in the dynamic dimension
    out = c(data)
    exp = [[24., 40., 38.]]
    np.testing.assert_array_equal(out, exp, err_msg='Error in sequential convolution without reduction dimension')

    c = Convolution(3, init=np.array([4., 2., 1.], dtype=np.float32), sequential=True, pad=False, reduction_rank=0, bias=False)
    c.update_signature(Sequence[Tensor[1]]) # input is a sequence of dim-1 vectors
    data = [np.array([[2.], [6], [4.], [8.], [6.]])]
    out = c(data)
    exp = [[[24.], [40.], [38]]] # not reducing; hence, output is also a sequence of dim-1 vectors
    np.testing.assert_array_equal(out, exp, err_msg='Error in sequential convolution without reduction dimension')

    # these cases failed before
    emb_dim = 10
    x = input(**Sequence[Tensor[20]])
    m = Embedding(emb_dim)(x)
    m = Convolution(filter_shape=3, sequential=True)(m)

    # this one still fails
    # Reshape: Operand (sub-)dimensions '[3]' incompatible with desired replacement (sub-)dimensions '[]'. Number of elements must be the same..
    m = Embedding(emb_dim)(x)
    m = reshape(m, (emb_dim,1))
    m = Convolution(filter_shape=(3,1), num_filters=13, pad=True, sequential=True)(m)

    m = Embedding(emb_dim)(x)
    m = Convolution(filter_shape=3, pad=True, sequential=True)(m)
コード例 #3
0
def create_word2vec_cbow_model(word_one_hot, context_one_hots, negative_one_hots):
	# shared_embedding_layer = Embedding(G.embedding_dimension, uniform(scale=1.0/2.0/G.embedding_dimension))
	shared_embedding_layer = Embedding(G.embedding_dimension)

	word_embedding = shared_embedding_layer(word_one_hot)
	context_embeddings = [shared_embedding_layer(x) for x in context_one_hots]
	negative_embeddings = [shared_embedding_layer(x) for x in negative_one_hots]

	print(word_embedding.shape)
	word_embedding_reshaped = C.reshape(word_embedding, shape=(1, G.embedding_dimension))
	print(word_embedding_reshaped.shape)

	context_embeddings_all = C.reshape(C.splice(*context_embeddings), shape=(context_size, G.embedding_dimension))
	negative_embeddings_all = C.reshape(C.splice(*negative_embeddings), shape=(G.negative, G.embedding_dimension))
	print(context_embeddings_all.shape)
	print(negative_embeddings_all.shape)
	cbow = C.reshape(C.reduce_mean(context_embeddings_all, 0), shape=(G.embedding_dimension))
	print(cbow.shape)

	# word_context_product = C.times_transpose(word_embedding_reshaped, cbow)
	word_context_product = C.times_transpose(word_embedding, cbow)
	print(word_context_product.shape)
	negative_context_product = C.reshape(C.times_transpose(negative_embeddings_all, cbow), shape=(G.negative))
	print(negative_context_product.shape)

	word_negative_context_product = C.splice(word_context_product, negative_context_product)
	print(word_negative_context_product.shape)
	# return model and shared embedding layer
	return word_negative_context_product, shared_embedding_layer
コード例 #4
0
ファイル: cntklstm.py プロジェクト: choudam/python-env
def LSTM_sequence_classifier_net(input, num_output_classes, embedding_dim,
                                LSTM_dim, cell_dim):
    lstm_classifier = Sequential([Embedding(embedding_dim),
                                  Recurrence(LSTM(LSTM_dim, cell_dim)),
                                  sequence.last,
                                  Dense(num_output_classes)])
    return lstm_classifier(input)
コード例 #5
0
def test_layers_name():
    from cntk import placeholder
    I = placeholder(name='input')
    p = Dense(10, name='dense10')(I)

    assert(p.name == 'dense10')
    assert(I.name == 'input')
    assert(p.root_function.name == 'dense10')

    q = Convolution((3, 3), 3, name='conv33')(I)
    assert(q.name == 'conv33')
    assert(q.root_function.name == 'conv33')

    e = Embedding(0, name='emb')(I)
    assert(e.name == 'emb')
    assert(e.root_function.name == 'emb')

    e = Embedding(0, name='')(I)
    assert(e.name == '')
    assert(e.root_function.name == '')
コード例 #6
0
ファイル: layers_test.py プロジェクト: muskanmahajan37/struct
def test_layers_embedding():
    embDim = 3
    y = C.input_variable(2)

    # embedding base case
    e = Embedding(shape=embDim, name='foo')

    dat = np.array([[-1., 1.]], dtype=np.float32)
    res = e(y).eval({y: dat})

    npout = np.matrix(dat[0]) * e.E.value
    np.testing.assert_array_equal(res,
                                  npout,
                                  err_msg='Error in embedding layer')

    # embedding, initialized from a user-supplied starting point for the parameter
    e = Embedding(embDim, init=[[1, 3, 2], [3, 4, 1]], name='bar')

    dat = np.array([[-1., 1.]], dtype=np.float32)
    res = e(y).eval({y: dat})

    npout = np.matrix(dat[0]) * e.E.value
    np.testing.assert_array_equal(res,
                                  npout,
                                  err_msg='Error in constant embedding layer')

    # embedding, initialized from a user-supplied constant weight table
    e = Embedding(weights=[[1, 3, 2], [3, 4, 1]], name='baz')

    dat = np.array([[-1., 1.]], dtype=np.float32)
    res = e(y).eval({y: dat})

    npout = np.matrix(dat[0]) * e.E.value
    np.testing.assert_array_equal(res,
                                  npout,
                                  err_msg='Error in constant embedding layer')

    # Failing calls
    with pytest.raises(ValueError):
        Embedding(shape=None, init=1, weights=[1., 2., 3.])

    with pytest.raises(ValueError):
        Embedding(3, weights=[1., 2., 3.])

    with pytest.raises(ValueError):
        Embedding(name="embedding")