def train(self):  # BOTH SAMPLING
        pdata, pdict, pembs = self.pdata, self.pdict, self.pembs

        def neg_sampling(i1, i2, lp=range(len(pdict))):
            d, env = self.dists(pembs[i1], pembs[i2])
            yield i1, i2, math.exp(-d), env
            for _ in xrange(self.num_negs):
                s1, s2 = random.choice(lp), random.choice(lp)
                d, env = self.dists(pembs[s1], pembs[s2])
                yield s1, s2, math.exp(-d), env

        for epoch in xrange(self.num_iter):
            print epoch
            random.shuffle(pdata)
            r = 1. * epoch / self.num_iter
            lr = (1 - r) * self.lr1 + r * self.lr2
            for w1, w2 in pdata:
                i1, i2 = pdict[w1], pdict[w2]
                exp_neg_dists = list(neg_sampling(i1, i2))
                Z = sum(map(operator.itemgetter(2), exp_neg_dists))
                for i, (i1, i2, d, env) in enumerate(exp_neg_dists):
                    gl, gr = self.backward(1. * (i == 0) - d / Z, env)
                    if gl is not None:
                        pembs[i1] = self.add_clip(-lr, pembs[i1], gl)
                    if gr is not None:
                        pembs[i2] = self.add_clip(-lr, pembs[i2], gr)
        pplot(self.pdict, self.pembs, 'mammal_numpy')
 def train(self):  # LEFT SAMPLING
     for epoch in xrange(self.num_iter):
         print epoch
         random.shuffle(self.pdata)
         r = 1. * epoch / self.num_iter
         lr = (1 - r) * self.lr1 + r * self.lr2
         for w1, w2 in self.pdata:
             i1, i2 = self.pdict[w1], self.pdict[w2]
             u = Variable(self.pembs[i1].unsqueeze(0), requires_grad=True)
             v = Variable(self.pembs[i2].unsqueeze(0), requires_grad=True)
             sp = torch.from_numpy(
                 np.random.randint(0,
                                   len(self.pdict),
                                   size=(self.num_negs, )))
             negs = Variable(self.pembs[sp], requires_grad=True)
             loss = -torch.log(
                 torch.exp(-self.dists(u, v)) /
                 torch.exp(-self.dists(u, negs)).sum())
             loss.backward()
             self.pembs[sp] -= lr * (((1 - negs.norm(dim=1)**2)**2) /
                                     4.).data.unsqueeze(1) * negs.grad.data
             self.pembs[i1] -= lr * ((
                 (1 - u.norm()**2)**2) / 4.).data * u.grad.data
             self.pembs[i2] -= lr * ((
                 (1 - v.norm()**2)**2) / 4.).data * v.grad.data
             self.pembs = self.proj(self.pembs)
     pplot(self.pdict, self.pembs, 'mammal_torch')
 def train(self):  # LEFT SAMPLING
     ld = len(self.pdata)
     lp = range(len(self.pdict))
     graph = tf.Graph()
     with graph.as_default():
         step = tf.Variable(0, trainable=False)
         pembs = tf.Variable(
             tf.random_uniform([len(self.pdict), self.dim],
                               minval=-0.001,
                               maxval=0.001))
         n1 = tf.placeholder(tf.int32, shape=(1, ), name='n1')
         n2 = tf.placeholder(tf.int32, shape=(1, ), name='n2')
         sp = tf.placeholder(tf.int32, shape=(self.num_negs, ), name='sp')
         u, v, negs = map(lambda x: tf.nn.embedding_lookup(pembs, x),
                          [n1, n2, sp])
         loss = -tf.log(
             tf.exp(-self.dists(u, v)) /
             tf.reduce_sum(tf.exp(-self.dists(u, negs))))
         learning_rate = tf.train.polynomial_decay(self.lr1, step,
                                                   self.num_iter * ld,
                                                   self.lr2)
         optimizer = tf.train.GradientDescentOptimizer(learning_rate)
         grad_vars = optimizer.compute_gradients(loss)
         rescaled = [(g * (1. - tf.reshape(tf.norm(v, axis=1),
                                           (-1, 1))**2)**2 / 4., v)
                     for g, v in grad_vars]
         trainstep = optimizer.apply_gradients(rescaled, global_step=step)
         pembs = self.proj(pembs)
         init = tf.global_variables_initializer()
     with tf.Session(graph=graph) as session:
         init.run()
         for epoch in xrange(self.num_iter):
             print epoch
             random.shuffle(self.pdata)
             for w1, w2 in self.pdata:
                 i1, i2 = self.pdict[w1], self.pdict[w2]
                 _,self.pembs = session.run([trainstep,pembs],feed_dict=\
                 {n1:[i1],n2:[i2],sp:[random.choice(lp) for _ in range(self.num_negs)]})
     pplot(self.pdict, self.pembs, 'mammal_tensor')
    def pplot(self, **kwargs):
        """
        Plotting method for AbsDyn Unit. It iterates on the component ctype of the Block and plots it.
        Currently, it only works with Var, Constraints and Expressions.

        :param kwargs:
        :return: lines, axe and figure
        """

        from utils import pplot

        ctype = kwargs.pop('ctype', Var)
        active = kwargs.pop('active', True)

        lines, ax, fig = pplot(
            *[v for v in self.component_objects(ctype=ctype, active=active)],
            **kwargs)

        return lines, ax, fig