Пример #1
0
    def run_denotation(self, lines, dataset='denotation'):
        """
        Runs stochastic gradient descent with model as objective.
        This method is about training with a very large dataset.
        """
        m = len(lines)

        # randomly shuffle data
        #random.shuffle(trees)

        for i in xrange(0, m-self.minibatch+1, self.minibatch):
            self.it += 1

            # Get data as much as it's specified by minibatch num
            # Load&parse trees beforehand
            # Get data as much as it's specified by minibatch num
            # Parse&load tree data beforehand
            trees = list(tr.inputarray(lines[i:i+self.minibatch], dataset))

            # map word indices to loaded trees
            tr.map_words_to_trees(trees, dataset)
            mb_data = trees#mb_data = trees[i:i+self.minibatch]

            # Get the gradient?
            cost,grad = self.model.costAndGrad(mb_data)

            # compute exponentially weighted cost
            if np.isfinite(cost):
                if self.it > 1:
                    self.expcost.append(.01*cost + .99*self.expcost[-1])
                else:
                    self.expcost.append(cost)

            if self.optimizer == 'sgd':
                update = grad
                scale = -self.alpha

            elif self.optimizer == 'adagrad':
                # trace = trace+grad.^2
                self.gradt[1:] = [gt+g**2
                        for gt,g in zip(self.gradt[1:],grad[1:])]
                # update = grad.*trace.^(-1/2)
                update =  [g*(1./np.sqrt(gt))
                        for gt,g in zip(self.gradt[1:],grad[1:])]
                # handle dictionary separately
                dL = grad[0]
                dLt = self.gradt[0]
                for j in dL.iterkeys():
                    dLt[:,j] = dLt[:,j] + dL[j]**2
                    dL[j] = dL[j] * (1./np.sqrt(dLt[:,j]))
                update = [dL] + update
                scale = -self.alpha


            # update params
            self.model.updateParams(scale,update,log=False)

            self.costt.append(cost)
            if self.it % 1 == 0:
                print "Iter %d : Cost=%.4f, ExpCost=%.4f."%(self.it,cost,self.expcost[-1])
Пример #2
0
def test_denotation(netFile, data, dataset):
    # trees, vocab = tr.loadTrees(dataset,data)
    assert netFile is not None, "Must give model to test"
    with open(netFile, "r") as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)

        x = pickle.load(open("mr_%s.p" % dataset, "rb"))
        W = x[0]
        W2 = 0.01 * np.random.randn(opts.wvecDim, opts.numWords)
        rnn = nnet_rte.RNNRTE(opts.wvecDim, opts.outputDim, 200, opts.numWords, opts.minibatch)
        rnn.initParams(W)
        rnn.fromFile(fid)

        lines = tr.get_lines(opts.dataset, opts.data)
        m = len(lines)
        CHUNK_SIZE = 10000
        minibatch = opts.minibatch
        print "Testing..."

        cost = correct = total = 0
        for i in xrange(0, m - CHUNK_SIZE + 1, CHUNK_SIZE):
            # Get data as much as it's specified by minibatch num
            # Load&parse trees beforehand
            # Get data as much as it's specified by minibatch num
            # Parse&load tree data beforehand
            trees = list(tr.inputarray(lines[i : i + CHUNK_SIZE], dataset))

            # map word indices to loaded trees
            tr.map_words_to_trees(trees, dataset)

            c, cor, tot = rnn.costAndGrad(trees, test=True)
            cost += c
            correct += cor
            total += tot
            if i % CHUNK_SIZE == 0:
                print "tested: %d" % i

    print "Cost %f, Correct %d/%d, Acc %f" % (cost, correct, total, correct / float(total))