Exemple #1
0
def writeEmbeddingsTSV(word_vec, filename):
    idx2word = revlut(word_vec.vocab)
    with codecs.open(filename, 'w') as f:
        wrtr = UnicodeWriter(f, delimiter='\t', quotechar='"')

        #        wrtr.writerow(['Word'])
        for i in range(len(idx2word)):
            row = idx2word[i]
            wrtr.writerow([row])
Exemple #2
0
    def restore(cls, sess, indir, base, checkpoint_name=None):
        """
        this method NEEDS to know the base name used in training for the model.

        while i declare a variable scope, I still grab variables by names, so
        we see duplication in using the base name to get the variables out. It
        would be great to fix this at some point to be cleaner.
        """
        klass = cls(sess, base)
        basename = indir + '/' + base
        checkpoint_name = checkpoint_name or basename
        with open(basename + '.saver') as fsv:
            saver_def = tf.train.SaverDef()
            text_format.Merge(fsv.read(), saver_def)
            print('Loaded saver def')

        with gfile.FastGFile(basename + '.graph', 'r') as f:
            gd = tf.GraphDef()
            gd.ParseFromString(f.read())
            sess.graph.as_default()
            tf.import_graph_def(gd, name='')
            print('Imported graph def')
            with tf.variable_scope(base):
                sess.run(saver_def.restore_op_name,
                         {saver_def.filename_tensor_name: checkpoint_name})
                klass.x = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'x:0')
                klass.xch = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'xch:0')
                klass.y = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'y:0')
                klass.pkeep = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'pkeep:0')
                klass.word_keep = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'word_keep:0')
                klass.phase = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'phase:0')

                klass.best = tf.get_default_graph().get_tensor_by_name('output/ArgMax:0') # X
                klass.probs = tf.get_default_graph().get_tensor_by_name('output/transpose:0') # X
                try:
                    klass.A = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'Loss/block/transitions:0')
                    print('Found transition matrix in graph, setting crf=True')
                    klass.crf = True
                except:
                    print('Failed to get transition matrix, setting crf=False')
                    klass.A = None
                    klass.crf = False


        with open(basename + '.labels', 'r') as f:
            klass.labels = json.load(f)

        klass.word_vocab = {}
        if os.path.exists(basename + '-word.vocab'):
            with open(basename + '-word.vocab', 'r') as f:
                klass.word_vocab = json.load(f)

        with open(basename + '-char.vocab', 'r') as f:
            klass.char_vocab = json.load(f)

        with open(basename + '-params', 'r') as f:
            params = json.load(f)
            klass.maxlen = params['maxlen']
            klass.maxw = params['maxw']
            # self.name = params['model_name']

        klass.saver = tf.train.Saver(saver_def=saver_def)
        klass.y_lut = revlut(klass.labels)

        return klass
Exemple #3
0
    def params(self, labels, word_vec, char_vec, mxlen, 
                maxw, rnntype, wsz, hsz, filtsz, num_filt=64, 
                kernel_size=3, num_layers=4, num_iterations=3, 
                crf=False):

        self.num_iterations = num_iterations
        self.num_layers = num_layers
        self.kernel_size = kernel_size
        self.num_filt = num_filt
        self.crf = crf
        char_dsz = char_vec.dsz
        nc = len(labels)
        self.num_classes=nc
        self.x = tf.placeholder(tf.int32, [None, mxlen], name="x")
        self.xch = tf.placeholder(tf.int32, [None, mxlen, maxw], name="xch")
        self.y = tf.placeholder(tf.int32, [None, mxlen], name="y")
        self.intermediate_probs = tf.placeholder(tf.int32, [None, mxlen, nc, num_iterations+2], name="y")
        self.pkeep = tf.placeholder(tf.float32, name="pkeep")
        self.word_keep = tf.placeholder(tf.float32, name="word_keep")
        self.labels = labels
        self.y_lut = revlut(labels)
        self.phase = tf.placeholder(tf.bool, name="phase")
        self.l2_loss = tf.constant(0.0)
        
        self.word_vocab = {}
        if word_vec is not None:
            self.word_vocab = word_vec.vocab
        self.char_vocab = char_vec.vocab
        self.char_dsz = char_dsz
        self.wsz = wsz
        self.mxlen = mxlen
        self.drop_penalty = 0.001

        
        self.A = tf.get_variable("transitions", [self.num_classes, self.num_classes])
        # if num_filt != nc:
        #     raise RuntimeError('number of filters needs to be equal to number of classes!')

        self.filtsz = [int(filt) for filt in filtsz.split(',') ]

        with tf.variable_scope('output/'):
            W = tf.Variable(tf.truncated_normal([self.num_filt, nc],
                                                stddev = 0.1), name="W")
            # W = tf.get_variable('W', initializer=tf.contrib.layers.xavier_initializer(), shape=[num_filt, nc])
            b = tf.Variable(tf.constant(0.0, shape=[1,nc]), name="b")

        intermediates = []


        if word_vec is not None:
            with tf.name_scope("WordLUT"):
                self.Ww = tf.Variable(tf.constant(word_vec.weights, dtype=tf.float32), name = "W")

                self.we0 = tf.scatter_update(self.Ww, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, word_vec.dsz]))

        with tf.name_scope("CharLUT"):
            self.Wc = tf.Variable(tf.constant(char_vec.weights, dtype=tf.float32), name = "W")

            self.ce0 = tf.scatter_update(self.Wc, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, self.char_dsz]))

        self.input_dropout_keep_prob = self.word_keep
        self.middle_dropout_keep_prob = 1.00
        self.hidden_dropout_keep_prob = self.pkeep

        self.intermediate_probs, self.probs = self.forward(self.hidden_dropout_keep_prob, 
                            self.input_dropout_keep_prob, 
                            self.middle_dropout_keep_prob, 
                            reuse=False)

        self.loss = self.createLoss()
Exemple #4
0
    def params(self,
               labels,
               word_vec,
               char_vec,
               mxlen,
               maxw,
               rnntype,
               wsz,
               hsz,
               filtsz,
               crf=False):

        self.crf = crf
        char_dsz = char_vec.dsz
        nc = len(labels)
        self.x = tf.placeholder(tf.int32, [None, mxlen], name="x")
        self.xch = tf.placeholder(tf.int32, [None, mxlen, maxw], name="xch")
        self.y = tf.placeholder(tf.int32, [None, mxlen], name="y")
        self.pkeep = tf.placeholder(tf.float32, name="pkeep")
        self.labels = labels
        self.y_lut = revlut(labels)

        self.word_vocab = {}
        if word_vec is not None:
            self.word_vocab = word_vec.vocab
        self.char_vocab = char_vec.vocab

        filtsz = [int(filt) for filt in filtsz.split(',')]

        if word_vec is not None:
            with tf.name_scope("WordLUT"):
                Ww = tf.Variable(tf.constant(word_vec.weights,
                                             dtype=tf.float32),
                                 name="W")

                we0 = tf.scatter_update(
                    Ww, tf.constant(0, dtype=tf.int32, shape=[1]),
                    tf.zeros(shape=[1, word_vec.dsz]))

                with tf.control_dependencies([we0]):
                    wembed = tf.nn.embedding_lookup(Ww,
                                                    self.x,
                                                    name="embeddings")

        with tf.name_scope("CharLUT"):
            Wc = tf.Variable(tf.constant(char_vec.weights, dtype=tf.float32),
                             name="W")

            ce0 = tf.scatter_update(Wc,
                                    tf.constant(0, dtype=tf.int32, shape=[1]),
                                    tf.zeros(shape=[1, char_dsz]))

            with tf.control_dependencies([ce0]):
                xch_seq = tensorToSeq(self.xch)
                cembed_seq = []
                for i, xch_i in enumerate(xch_seq):
                    cembed_seq.append(
                        sharedCharWord(Wc, xch_i, maxw, filtsz, char_dsz, wsz,
                                       None if i == 0 else True))
                word_char = seqToTensor(cembed_seq)

            # List to tensor, reform as (T, B, W)
            # Join embeddings along the third dimension
            joint = word_char if word_vec is None else tf.concat(
                [wembed, word_char], 2)
            joint = tf.nn.dropout(joint, self.pkeep)

        with tf.name_scope("Recurrence"):
            embedseq = tensorToSeq(joint)

            if rnntype == 'blstm':
                rnnfwd = tf.contrib.rnn.BasicLSTMCell(hsz)
                rnnbwd = tf.contrib.rnn.BasicLSTMCell(hsz)

                # Primitive will wrap the fwd and bwd, reverse signal for bwd, unroll
                rnnseq, _, __ = tf.contrib.rnn.static_bidirectional_rnn(
                    rnnfwd, rnnbwd, embedseq, dtype=tf.float32)
            else:
                rnnfwd = tf.nn.rnn_cell.BasicLSTMCell(hsz)
                # Primitive will wrap RNN and unroll in time
                rnnseq, _ = tf.nn.rnn(rnnfwd, embedseq, dtype=tf.float32)

        with tf.name_scope("output"):
            # Converts seq to tensor, back to (B,T,W)

            if rnntype == 'blstm':
                hsz *= 2

            W = tf.Variable(tf.truncated_normal([hsz, nc], stddev=0.1),
                            name="W")
            b = tf.Variable(tf.constant(0.0, shape=[1, nc]), name="b")

            preds = [tf.matmul(rnnout, W) + b for rnnout in rnnseq]
            self.probs = seqToTensor(preds)
            self.best = tf.argmax(self.probs, 2)

            print(joint.get_shape())
            print(W.get_shape())
            print(rnnseq[0].get_shape())
            print(rnnseq[1].get_shape())
            print(self.probs.get_shape())