예제 #1
0
def eval_input_fn():
    dense = xdl.mock_dense_op(shape=[1, 16], value=0.01)
    labels = xdl.mock_dense_op(shape=[1, 1], value=1.0)
    ids, values, segments = xdl.mock_sparse_op(dense_shape=[1, 16])
    sparse = xdl.SparseTensor(ids, values, segments)
    emb = xdl.embedding("sparse", sparse, xdl.Ones(), 1, 16, 'sum')
    dense.set_shape([None, 16])
    labels.set_shape([None, 1])
    return [dense, emb], labels
예제 #2
0
def eval_input_fn():
    dense = xdl.mock_dense_op(shape=[1, 16], value=0.01)
    labels = xdl.mock_dense_op(shape=[1, 1], value=1.0) 
    ids, values, segments = xdl.mock_sparse_op(dense_shape=[1, 16])
    sparse = xdl.SparseTensor(ids, values, segments)
    emb = xdl.embedding("sparse", sparse, xdl.Ones(), 1, 16, 'sum')
    dense.set_shape([None, 16])
    labels.set_shape([None, 1])
    return [dense, emb], labels
예제 #3
0
def main():
    dense = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="dense")
    gear = xdl.mock_dense_op(shape=[1, 1], value=0.01, name_="gear")
    labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label")
    gear.set_shape([1, 1])
    dense.set_shape([1, 16])
    labels.set_shape([1, 1])
    with xdl.model_scope("ams_main"):
        loss = ams_main(main_model)(dense, labels, gear_inputs=[gear])
        sess = xdl.TrainSession()
        return sess.run([xdl.get_collection("gear_grad")])
예제 #4
0
def input_fn():
    dense = xdl.mock_dense_op(shape=[1, 16], value=0.01)
    indicator = xdl.mock_dense_op(shape=[5], value=0.0)
    labels = xdl.mock_dense_op(shape=[5, 1], value=1.0)
    ids, values, segments = xdl.mock_sparse_op(dense_shape=[1, 16])
    sparse = xdl.SparseTensor(ids, values, segments)
    sparse.set_shape([1, 16])
    emb = xdl.embedding("sparse", sparse, xdl.Ones(), 1, 16, 'sum')
    dense.set_shape([1, 16])
    indicator.set_shape([5])
    labels.set_shape([5, 1])
    return [dense, emb, indicator], labels
예제 #5
0
def input_fn():
    dense = xdl.mock_dense_op(shape=[1, 16], value=0.01)
    indicator = xdl.mock_dense_op(shape=[5], value=0.0)
    labels = xdl.mock_dense_op(shape=[5, 1], value=1.0) 
    ids, values, segments = xdl.mock_sparse_op(dense_shape=[1, 16])
    sparse = xdl.SparseTensor(ids, values, segments)
    sparse.set_shape([1,16])
    emb = xdl.embedding("sparse", sparse, xdl.Ones(), 1, 16, 'sum')
    dense.set_shape([1, 16])
    indicator.set_shape([5])
    labels.set_shape([5, 1])
    return [dense, emb, indicator], labels
def main():
  dense = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="dense")
  gear = xdl.mock_dense_op(shape=[1, 1], value=0.01, name_="gear")
  labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label")
  ids, values, segments = xdl.mock_sparse_op(dense_shape=[1, 16], name_="wide")
  sparse = xdl.SparseTensor(ids, values, segments)
  emb = xdl.embedding("sparse", sparse, xdl.Ones(), 1, 16, 'sum')
  gear.set_shape([None, 1])
  dense.set_shape([None, 16])
  labels.set_shape([None, 1])
  with xdl.model_scope("ams_main"):
    loss = ams_main(main_model)(dense, emb, labels, gear_inputs=[gear])
    sess = xdl.TrainSession()
    return sess.run(xdl.get_collection("gear_grad"))
def run(name1, name2, scope):
    with xdl.model_scope(scope):
        labels = xdl.mock_dense_op(shape=[1, 1], value=1.0)
        mock_embs = mock_embedding(name1, name2)
        loss = model(mock_embs, labels)
        train_op = xdl.SGD(lr).optimize()
        hooks = []
        sess = xdl.TrainSession(hooks)
        run_ops = [train_op, loss]
        op_names = ['none', 'loss']

        embed_vars = [
            var for var in trainable_variables() if is_embedding_var(var)
        ]
        sparse_embed_grads = []
        for var in embed_vars:
            sparse_embed_grads.append(xdl.get_sparse_grads(var.name))
            op_names.append(var.name + '.indices')
            op_names.append(var.name + '.grads')
        for i in range(len(sparse_embed_grads)):
            run_ops.append(sparse_embed_grads[i].indices)
            run_ops.append(sparse_embed_grads[i].grad)
        var_list = sess.run(run_ops)
        if name1 != name2:
            return var_list[3], var_list[5]
        return var_list[3]
예제 #8
0
 def test_all(self):
     dense = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="dense")
     labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label")
     ids = xdl.convert_to_tensor(
         np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64))
     values = xdl.convert_to_tensor(
         np.array([1.0, 2.0, 3.0], dtype=np.float32))
     segments = xdl.convert_to_tensor(np.array([3], dtype=np.int32))
     sparse = xdl.SparseTensor(ids, values, segments)
     emb = xdl.embedding("sparse",
                         sparse,
                         xdl.Ones(),
                         1,
                         16,
                         'sum',
                         vtype='hash')
     loss = model(dense, emb, labels)
     train_op = xdl.SGD(0.5).optimize()
     sess = xdl.TrainSession()
     _, l, g = sess.run(
         [train_op, loss,
          xdl.get_sparse_grads('sparse').grad])
     self.assertTrue((l == np.array(0.0024364376, dtype=np.float32)).all())
     self.assertTrue(
         (g == np.array([[-0.002433472], [-0.004866944], [-0.007300416]],
                        dtype=np.float32)).all())
     sparse_var = xdl.get_variable_by_name('sparse')
     weights = sess.run(
         sparse_var.gather(
             np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64)))
     self.assertTrue(
         (weights == np.array([[1.0012168], [1.0024334], [1.0036502]],
                              dtype=np.float32)).all())
     _, l, g = sess.run(
         [train_op, loss,
          xdl.get_sparse_grads('sparse').grad])
     self.assertTrue((l == np.array(0.002395329, dtype=np.float32)).all())
     self.assertTrue(
         (g == np.array([[-0.0023924622], [-0.0047849244], [-0.0071773864]],
                        dtype=np.float32)).all())
     weights = sess.run(
         sparse_var.gather(
             np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64)))
     self.assertTrue(
         (weights == np.array([[1.002413], [1.0048258], [1.0072389]],
                              dtype=np.float32)).all())
예제 #9
0
def gear():
    forward = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="forward")
    backward = xdl.mock_dense_op(shape=[1, 16], value=0.02, name_="backward")
    labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label1")
    init_grad = xdl.mock_dense_op(shape=[1, 1], value=0.3, name_="init_grad")
    forward.set_shape([1, 16])
    backward.set_shape([1, 16])
    labels.set_shape([1, 1])
    init_grad.set_shape([1, 1])
    predict = ams_gear([forward], [backward], init_grad)(gear_model)(None)
    with xdl.model_scope("ams_gear_forward"):
        sess = xdl.TrainSession()
        prediction = sess.run(predict)
    with xdl.model_scope("ams_gear_backward"):
        grads = xdl.get_gradient("fc_weight")
        sess = xdl.TrainSession()
        fc_weight_grad = sess.run(grads)
        return prediction, fc_weight_grad
예제 #10
0
def main():
    dense = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="dense")
    labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label")
    ids = xdl.convert_to_tensor(
        np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64))
    values = xdl.convert_to_tensor(np.array([1.0, 2.0, 3.0], dtype=np.float32))
    segments = xdl.convert_to_tensor(np.array([3], dtype=np.int32))
    sparse = xdl.SparseTensor(ids, values, segments)
    emb = xdl.embedding("sparse",
                        sparse,
                        xdl.Ones(),
                        1,
                        16,
                        'sum',
                        vtype='hash')
    loss = model(dense, emb, labels)
    train_op = xdl.SGD(0.5).optimize()
    sess = xdl.TrainSession()
    loss, gradients = sess.run([loss, xdl.get_sparse_grads('sparse').grad])
    return loss, gradients
예제 #11
0
def run(name1, name2, scope, optimizer):
    with xdl.model_scope(scope):
        labels = xdl.mock_dense_op(shape=[1, 1], value=1.0)
        mock_embs = mock_embedding(name1, name2)
        loss = model(mock_embs, labels)
        if optimizer == 'sgd':
            train_op = xdl.SGD(0.5).optimize()
        elif optimizer == 'momentum':
            train_op = xdl.Momentum(0.005, 0.99).optimize()
        elif optimizer == 'ftrl':
            train_op = xdl.Ftrl(0.01).optimize()
        elif optimizer == 'adam':
            train_op = xdl.Adam(0.001).optimize()
        elif optimizer == 'adagrad':
            train_op = xdl.Adagrad(0.04, 0.1).optimize()
        elif optimizer == 'rmsprop':
            train_op = xdl.RMSProp(0.001).optimize()
        else:
            train_op = xdl.SGD(0.5).optimize()
        hooks = []
        sess = xdl.TrainSession(hooks)
        run_ops = [train_op, loss]
        op_names = ['none', 'loss']

        embed_vars = [
            var for var in trainable_variables_with_scope(scope)
            if is_embedding_var(var)
        ]
        sparse_embed_grads = []
        for var in embed_vars:
            sparse_embed_grads.append(xdl.get_sparse_grads(var.name))
            op_names.append(var.name + '.indices')
            op_names.append(var.name + '.grads')
        for i in range(len(sparse_embed_grads)):
            run_ops.append(sparse_embed_grads[i].indices)
            run_ops.append(sparse_embed_grads[i].grad)
        var_list = sess.run(run_ops)
        if name1 != name2:
            return var_list[3], var_list[5]
        return var_list[3]