コード例 #1
0
    def test_embedding(self):
        """the first 3 and the last 3 should be diametrically opposed"""
        data = [[0], [1], [2], [3], [4], [5]]
        targets = [0, 0, 0, 1, 1, 1]

        ls = MS.GradientDescent(lr=0.5)
        cost = MC.NegativeLogLikelihood()

        emb = ML.Embedding(1, 2, len(data), learningScenario=ls, name="emb")
        o = ML.SoftmaxClassifier(2,
                                 learningScenario=MS.Fixed(),
                                 costObject=cost,
                                 name="out")
        net = emb > o

        miniBatchSize = 2
        for i in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                net.train(o,
                          emb=data[i:i + miniBatchSize],
                          targets=targets[i:i + miniBatchSize])

        embeddings = emb.getEmbeddings()
        for i in xrange(0, len(data) / 2):
            v = numpy.dot(embeddings[i], embeddings[i + len(data) / 2])
            self.assertTrue(v < -1)
コード例 #2
0
    def test_multiinputs(self):
        ls = MS.GradientDescent(lr=0.1)

        inpA = ML.Embedding(2, 2, 2, name="IA")
        inpB = ML.Input(2, name="IB")
        inpNexus = ML.Composite(name="InputNexus")

        h1 = ML.Hidden(32,
                       activation=MA.ReLU(),
                       decorators=[],
                       regularizations=[],
                       name="Fully-connected1")

        o = ML.Regression(2,
                          decorators=[],
                          activation=MA.ReLU(),
                          learningScenario=ls,
                          costObject=MC.CrossEntropy(),
                          name="Out",
                          regularizations=[])

        inpA > inpNexus
        inpB > inpNexus
        m = inpNexus > h1 > o
        m.init()
コード例 #3
0
    def test_embedding(self):
        """the first 3 and the last 3 should be diametrically opposed"""
        data = [[0], [1], [2], [3], [4], [5]]
        targets = [0, 0, 0, 1, 1, 1]

        ls = MS.GradientDescent(lr=0.5)
        cost = MC.NegativeLogLikelihood()

        inp = ML.Input(1, 'inp')
        emb = ML.Embedding(nbDimensions=2,
                           dictSize=len(data),
                           learningScenari=[ls],
                           name="emb")
        o = ML.SoftmaxClassifier(2,
                                 learningScenari=[MS.Fixed()],
                                 cost=cost,
                                 name="out")
        net = inp > emb > o
        net.init()

        miniBatchSize = 2
        for i in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                net["out"].train({
                    "inp.inputs": data[i:i + miniBatchSize],
                    "out.targets": targets[i:i + miniBatchSize]
                })["out.drive.train"]

        embeddings = emb.getP("embeddings").getValue()
        for i in xrange(0, len(data) / 2):
            v = numpy.dot(embeddings[i], embeddings[i + len(data) / 2])
            self.assertTrue(v < -1)
コード例 #4
0
ファイル: embedding.py プロジェクト: rsumner31/Mariana-212
import Mariana.scenari as MS

import Mariana.settings as MSET

MSET.VERBOSE = False

#The first 3 and the last 3 should end up diametrically opposed
data = [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [5, 0]]
targets = [0, 0, 0, 1, 1, 1]

ls = MS.GradientDescent(lr=0.5)
cost = MC.NegativeLogLikelihood()

emb = ML.Embedding(2,
                   nbDimentions=2,
                   dictSize=len(data),
                   learningScenario=ls,
                   name="emb")
o = ML.SoftmaxClassifier(2,
                         learningScenario=MS.Fixed(),
                         costObject=cost,
                         name="out")
net = emb > o

miniBatchSize = 2
print "before:"
net.init()

print emb.getEmbeddings()

for i in xrange(2000):