def run(name1, name2, scope):
    with xdl.model_scope(scope):
        labels = xdl.mock_dense_op(shape=[1, 1], value=1.0)
        mock_embs = mock_embedding(name1, name2)
        loss = model(mock_embs, labels)
        train_op = xdl.SGD(lr).optimize()
        hooks = []
        sess = xdl.TrainSession(hooks)
        run_ops = [train_op, loss]
        op_names = ['none', 'loss']

        embed_vars = [
            var for var in trainable_variables() if is_embedding_var(var)
        ]
        sparse_embed_grads = []
        for var in embed_vars:
            sparse_embed_grads.append(xdl.get_sparse_grads(var.name))
            op_names.append(var.name + '.indices')
            op_names.append(var.name + '.grads')
        for i in range(len(sparse_embed_grads)):
            run_ops.append(sparse_embed_grads[i].indices)
            run_ops.append(sparse_embed_grads[i].grad)
        var_list = sess.run(run_ops)
        if name1 != name2:
            return var_list[3], var_list[5]
        return var_list[3]
Ejemplo n.º 2
0
def train_and_evaluate():
    estimator = xdl.Estimator(model_fn=model_fn, optimizer=xdl.SGD(0.5))

    estimator.train_and_evaluate(train_input_fn=input_fn,
                                 eval_input_fn=eval_input_fn,
                                 eval_interval=1000,
                                 eval_steps=200,
                                 checkpoint_interval=1000,
                                 max_step=5000)
Ejemplo n.º 3
0
def train():
    batch = reader.read()
    sess = xdl.TrainSession()
    emb1 = xdl.embedding('emb1', batch['sparse0'], xdl.TruncatedNormal(stddev=0.001), 8, 1024, vtype='hash')
    emb2 = xdl.embedding('emb2', batch['sparse1'], xdl.TruncatedNormal(stddev=0.001), 8, 1024, vtype='hash')
    loss = model(batch['deep0'], [emb1, emb2], batch['label'])
    train_op = xdl.SGD(0.5).optimize()
    log_hook = xdl.LoggerHook(loss, "loss:{0}", 10)
    sess = xdl.TrainSession(hooks=[log_hook])
    while not sess.should_stop():
        sess.run(train_op)
Ejemplo n.º 4
0
def run(name1, name2, scope, optimizer):
    with xdl.model_scope(scope):
        labels = xdl.mock_dense_op(shape=[1, 1], value=1.0)
        mock_embs = mock_embedding(name1, name2)
        loss = model(mock_embs, labels)
        if optimizer == 'sgd':
            train_op = xdl.SGD(0.5).optimize()
        elif optimizer == 'momentum':
            train_op = xdl.Momentum(0.005, 0.99).optimize()
        elif optimizer == 'ftrl':
            train_op = xdl.Ftrl(0.01).optimize()
        elif optimizer == 'adam':
            train_op = xdl.Adam(0.001).optimize()
        elif optimizer == 'adagrad':
            train_op = xdl.Adagrad(0.04, 0.1).optimize()
        elif optimizer == 'rmsprop':
            train_op = xdl.RMSProp(0.001).optimize()
        else:
            train_op = xdl.SGD(0.5).optimize()
        hooks = []
        sess = xdl.TrainSession(hooks)
        run_ops = [train_op, loss]
        op_names = ['none', 'loss']

        embed_vars = [
            var for var in trainable_variables_with_scope(scope)
            if is_embedding_var(var)
        ]
        sparse_embed_grads = []
        for var in embed_vars:
            sparse_embed_grads.append(xdl.get_sparse_grads(var.name))
            op_names.append(var.name + '.indices')
            op_names.append(var.name + '.grads')
        for i in range(len(sparse_embed_grads)):
            run_ops.append(sparse_embed_grads[i].indices)
            run_ops.append(sparse_embed_grads[i].grad)
        var_list = sess.run(run_ops)
        if name1 != name2:
            return var_list[3], var_list[5]
        return var_list[3]
Ejemplo n.º 5
0
 def test_all(self):
     dense = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="dense")
     labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label")
     ids = xdl.convert_to_tensor(
         np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64))
     values = xdl.convert_to_tensor(
         np.array([1.0, 2.0, 3.0], dtype=np.float32))
     segments = xdl.convert_to_tensor(np.array([3], dtype=np.int32))
     sparse = xdl.SparseTensor(ids, values, segments)
     emb = xdl.embedding("sparse",
                         sparse,
                         xdl.Ones(),
                         1,
                         16,
                         'sum',
                         vtype='hash')
     loss = model(dense, emb, labels)
     train_op = xdl.SGD(0.5).optimize()
     sess = xdl.TrainSession()
     _, l, g = sess.run(
         [train_op, loss,
          xdl.get_sparse_grads('sparse').grad])
     self.assertTrue((l == np.array(0.0024364376, dtype=np.float32)).all())
     self.assertTrue(
         (g == np.array([[-0.002433472], [-0.004866944], [-0.007300416]],
                        dtype=np.float32)).all())
     sparse_var = xdl.get_variable_by_name('sparse')
     weights = sess.run(
         sparse_var.gather(
             np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64)))
     self.assertTrue(
         (weights == np.array([[1.0012168], [1.0024334], [1.0036502]],
                              dtype=np.float32)).all())
     _, l, g = sess.run(
         [train_op, loss,
          xdl.get_sparse_grads('sparse').grad])
     self.assertTrue((l == np.array(0.002395329, dtype=np.float32)).all())
     self.assertTrue(
         (g == np.array([[-0.0023924622], [-0.0047849244], [-0.0071773864]],
                        dtype=np.float32)).all())
     weights = sess.run(
         sparse_var.gather(
             np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64)))
     self.assertTrue(
         (weights == np.array([[1.002413], [1.0048258], [1.0072389]],
                              dtype=np.float32)).all())
Ejemplo n.º 6
0
def main():
    dense = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="dense")
    labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label")
    ids = xdl.convert_to_tensor(
        np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64))
    values = xdl.convert_to_tensor(np.array([1.0, 2.0, 3.0], dtype=np.float32))
    segments = xdl.convert_to_tensor(np.array([3], dtype=np.int32))
    sparse = xdl.SparseTensor(ids, values, segments)
    emb = xdl.embedding("sparse",
                        sparse,
                        xdl.Ones(),
                        1,
                        16,
                        'sum',
                        vtype='hash')
    loss = model(dense, emb, labels)
    train_op = xdl.SGD(0.5).optimize()
    sess = xdl.TrainSession()
    loss, gradients = sess.run([loss, xdl.get_sparse_grads('sparse').grad])
    return loss, gradients
Ejemplo n.º 7
0
def predict():
    estimator = xdl.Estimator(model_fn=model_fn, optimizer=xdl.SGD(0.5))
    estimator.predict(input_fn, checkpoint_version="", max_step=2000)
Ejemplo n.º 8
0
def evaluate():
    estimator = xdl.Estimator(model_fn=model_fn, optimizer=xdl.SGD(0.5))
    estimator.evaluate(input_fn, checkpoint_version="", max_step=2000)
Ejemplo n.º 9
0
def train():
    estimator = xdl.Estimator(model_fn=model_fn, optimizer=xdl.SGD(0.5))
    estimator.train(input_fn, max_step=2000, checkpoint_interval=1000)
Ejemplo n.º 10
0
def train():
    batch = data_io.read()
    print batch

    embs = list()

    for i in range(1, embs_len + 1):
        name = "item_%d" % i
        emb = xdl.embedding(name,
                            batch[name],
                            xdl.Ones(),
                            1,
                            1000,
                            'sum',
                            vtype='hash')
        embs.append(emb)
        print "emb =", name, ", shape =", emb.shape
    print "origin batch[label].shape =", batch["label"].shape

    loss, prop, label, indicator, din, dout, fc1_weight, fc1_bias, fc2_weight, fc2_bias = model(
        embs, batch["label"], 4, 7)
    train_op = xdl.SGD(0.5).optimize()

    item1_grad = xdl.get_gradient('item_1')
    item2_grad = xdl.get_gradient('item_2')
    item3_grad = xdl.get_gradient('item_3')
    item4_grad = xdl.get_gradient('item_4')
    fc1_weight_grad = xdl.get_gradient('fc1_weight')
    fc1_bias_grad = xdl.get_gradient('fc1_bias')
    fc2_weight_grad = xdl.get_gradient('fc2_weight')
    fc2_bias_grad = xdl.get_gradient('fc2_bias')

    sess = xdl.TrainSession()

    loop_num = 0
    while not sess.should_stop():
        if loop_num == 5:
            break
        print "\n>>>>>>>>>>>> loop_num = %d" % loop_num
        result = sess.run([train_op, loss, prop, batch['label'], label, indicator, din, dout, \
                           batch['item_1'].ids, batch['item_1'].segments, batch['item_1'].values, \
                           batch['item_2'].ids, batch['item_2'].segments, batch['item_2'].values, \
                           batch['item_3'].ids, batch['item_3'].segments, batch['item_3'].values, \
                           batch['item_4'].ids, batch['item_4'].segments, batch['item_4'].values, \
                           item1_grad, item2_grad, item3_grad, item4_grad, \
                           fc1_weight, fc1_bias, fc1_weight_grad, fc1_bias_grad, \
                           fc2_weight, fc2_bias, fc2_weight_grad, fc2_bias_grad])
        if result is None:
            break
        print "loss:", result[-31]
        print "prop:", result[-30]
        print "origin label:", result[-29]
        print "label:", result[-28]
        print "indicator:", result[-27]
        print "din:", result[-26]
        print "dout:", result[-25]
        print "item_1: ids=", result[-24], "\n        segments=", result[
            -23], "\n        values=", result[-22]
        print "item_2: ids=", result[-21], "\n        segments=", result[
            -20], "\n        values=", result[-19]
        print "item_3: ids=", result[-18], "\n        segments=", result[
            -17], "\n        values=", result[-16]
        print "item_4: ids=", result[-15], "\n        segments=", result[
            -14], "\n        values=", result[-13]
        print "item1_grad", result[-12]
        print "item2_grad", result[-11]
        print "item1_grad", result[-10]
        print "item2_grad", result[-9]
        print "fc1_weight", result[-8]
        print "fc1_bias", result[-7]
        print "fc1_weight_grad", result[-6]
        print "fc1_bias_grad", result[-5]
        print "fc2_weight", result[-4]
        print "fc2_bias", result[-3]
        print "fc2_weight_grad", result[-2]
        print "fc2_bias_grad", result[-1]
        loop_num += 1