Пример #1
0
    def __init__(self, args, vocab):
        dim_emb = args.dim_emb
        filter_sizes = [int(x) for x in args.filter_sizes.split(',')]
        n_filters = args.n_filters

        self.dropout = tf.placeholder(tf.float32, name='dropout')
        self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
        self.x = tf.placeholder(
            tf.int32,
            [None, None],  # batch_size * max_len
            name='x')
        self.y = tf.placeholder(tf.float32, [None], name='y')

        # embeding
        embedding = tf.get_variable('embedding', [vocab.size, dim_emb])
        x = tf.nn.embedding_lookup(embedding, self.x)

        # import CNN
        self.logits = cnn(x, filter_sizes, n_filters, self.dropout, 'cnn')

        # Sigmoid binomiial ditribution?
        self.probs = tf.sigmoid(self.logits)

        # cross entropy loss
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y,
                                                       logits=self.logits)

        # mean over all samples
        self.loss = tf.reduce_mean(loss)

        # optimize
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate) \
          .minimize(self.loss)

        self.saver = tf.train.Saver()
Пример #2
0
    def __init__(self, args, vocab):
        dim_emb = args.dim_emb
        filter_sizes = [int(x) for x in args.filter_sizes.split(',')]
        n_filters = args.n_filters

        self.dropout = tf.placeholder(tf.float32,
            name='dropout')
        self.learning_rate = tf.placeholder(tf.float32,
            name='learning_rate')
        self.x = tf.placeholder(tf.int32, [None, None],    #batch_size * max_len
            name='x')
        self.y = tf.placeholder(tf.float32, [None],
            name='y')

        embedding = tf.get_variable('embedding', [vocab.size, dim_emb])
        x = tf.nn.embedding_lookup(embedding, self.x)
        self.logits = cnn(x, filter_sizes, n_filters, self.dropout, 'cnn')
        self.probs = tf.sigmoid(self.logits)

        loss = tf.nn.sigmoid_cross_entropy_with_logits(
            labels=self.y, logits=self.logits)
        self.loss = tf.reduce_mean(loss)
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate) \
            .minimize(self.loss)

        self.saver = tf.train.Saver()
def classify_cnn(model_path, text, dp, t = 0):
	f = open(dp, 'rb')
	p = pickle.load(f)
	f.close()

	v = list(p['vocab'])
	c = list(p['categories'])

	v.sort()
	c.sort()

	v = { w : i + 1 for i, w in  enumerate(v)}
	c = { w : i for i, w in enumerate(c)}
	ms = p['max_sequence']
	ms = 10 * int(ms / 10) + 10

	model = cnn((ms,), len(v) + 1, ms, pretrained_weights=model_path, t = t)

	x = preprocessing(text)
	x = compute_input(x, v, ms)
	x = np.reshape(x, (1, 5430))


	y = model.predict_on_batch(x)

	return read_output(y, c)
    def __init__(self, args, vocab):
        dim_emb = args.dim_emb
        filter_sizes = [int(x) for x in args.filter_sizes.split(',')]
        n_filters = args.n_filters

        self.dropout = tf.compat.v1.placeholder(tf.float32,
            name='dropout')
        self.learning_rate = tf.compat.v1.placeholder(tf.float32,
            name='learning_rate')
        self.x = tf.compat.v1.placeholder(tf.int32, [None, None],    #batch_size * max_len
            name='x')
        self.y = tf.compat.v1.placeholder(tf.float32, [None],
            name='y')

        embedding = tf.compat.v1.get_variable('embedding', [vocab.size, dim_emb])
        x = tf.nn.embedding_lookup(params=embedding, ids=self.x)
        self.logits = cnn(x, filter_sizes, n_filters, self.dropout, 'cnn')
        self.probs = tf.sigmoid(self.logits)

        loss = tf.nn.sigmoid_cross_entropy_with_logits(
            labels=self.y, logits=self.logits)
        self.loss = tf.reduce_mean(input_tensor=loss)
        self.optimizer = tf.compat.v1.train.AdamOptimizer(self.learning_rate) \
            .minimize(self.loss)

        self.saver = tf.compat.v1.train.Saver()
Пример #5
0
    def __init__(self, args, vocab):
        dim_emb = args.dim_emb
        filter_sizes = range(1, 1 + args.max_filter_width)
        n_filters = args.n_filters

        self.dropout = tf.placeholder(tf.float32, name='dropout')
        self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
        self.x = tf.placeholder(
            tf.int32,
            [None, None],  #batch_size * max_len
            name='x')
        self.y = tf.placeholder(tf.float32, [None], name='y')

        embedding = tf.get_variable('embedding', [vocab.size, dim_emb])
        x = tf.nn.embedding_lookup(embedding, self.x)
        self.logits = cnn(x, filter_sizes, n_filters, self.dropout, 'cnn')
        self.probs = tf.sigmoid(self.logits)

        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y,
                                                       logits=self.logits)
        self.loss = tf.reduce_mean(loss)
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate) \
            .minimize(self.loss)

        self.saver = tf.train.Saver()
Пример #6
0
image = tf.decode_raw(features['image_raw'], tf.uint8)
# height and width of image is 28 pixels,  28*28 = 784
image.set_shape([784])

image = tf.cast(image, tf.float32) * (1. / 255)
label = tf.cast(features['label'], tf.int32)

images_batch, labels_batch = tf.train.shuffle_batch([image, label],
                                                    batch_size=128,
                                                    capacity=2000,
                                                    min_after_dequeue=1000)

# keep probability just the inverse of 'dropout' rate used in some other frameworks
# keep out probability of 0.75 == dropout rate of 0.25
keep_prob = 0.75
y_pred = cnn(images_batch, keep_prob)

loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
                                                      labels=labels_batch)
loss_mean = tf.reduce_mean(loss)
train_op = tf.train.AdamOptimizer().minimize(loss)

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

init = tf.local_variables_initializer()
sess.run(init)

coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)