def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[4], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_dense_apply_adam_op(beta1=np.array(0.9, dtype=np.float), beta2=np.array(0.999, dtype=np.float), epsilon=np.array(1e-08, dtype=np.float), learning_rate=np.array(0.1, dtype=np.float), grad=np.array([1, 2, 3, 4], dtype=np.float32), lr_decay=True, var_name="w", var_type="index") execute(op) ret = execute(var.value) self.assertTrue( (ret == np.array([0.90000004, 0.90000004, 0.90000004, 0.90000004], dtype=np.float32)).all()) execute(op) ret = execute(var.value) self.assertTrue( (ret == np.array([0.8000001, 0.8000001, 0.8000001, 0.8], dtype=np.float32)).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[4, 1], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_sparse_apply_momentum_op(learning_rate=np.array( 0.5, dtype=np.float), momentum=np.array(0.9, dtype=np.float), grad=np.array([[1], [2]], dtype=np.float32), indices=np.array([1, 2], dtype=np.int32), var_name="w", var_type="index", use_nesterov=False) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([[1], [0.5], [0], [1]], dtype=np.float32)).all()) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([[1], [-0.45], [-1.9], [1]], dtype=np.float32)).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[2,2], initializer=xdl.Identity(np.array([[1,2],[3,4]], dtype=np.float32))) execute(xdl.variable_registers()) execute(xdl.global_initializers()) ret = execute(var.value) self.assertTrue((ret == np.array([[1,2],[3,4]])).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.int32, shape=[4], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_pull_op(var_name="w", var_type="index", dtype=DataType.int32) ret = execute(op) self.assertTrue((ret == np.array([1,1,1,1])).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[4, 1], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_sparse_apply_ftrl_op( learning_rate=np.array(0.1, dtype=np.float), learning_rate_power=np.array(-0.5, dtype=np.float), initial_accumulator_value=np.array(0.1, dtype=np.float), l1_reg=np.array(0, dtype=np.float), l2_reg=np.array(0, dtype=np.float), grad=np.array([[1], [2]], dtype=np.float32), indices=np.array([1, 2], dtype=np.int32), var_name="w", var_type="index") execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([[1], [0.6031424], [0.7450533], [1]], dtype=np.float32)).all()) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([[1], [0.5341358], [0.6747804], [1]], dtype=np.float32)).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.int32, shape=[4], initializer=xdl.Zeros()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_assign_add_op(var_name="w", var_type="index", delta=np.array([1,2,3,4], dtype=np.int32)) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([1,2,3,4])).all())
def test_auc(self): labels = np.array([1, 0, 1, 1, 0, 0, 1, 1, 0, 0], dtype=np.float32) predicts = np.array([0.7, 0.2, 0.6, 0.8, 0.1, 0.2, 0.6, 0.9, 0.1, 0.1], dtype=np.float32) res = xdl.auc(predicts, labels) execute(xdl.variable_registers()) execute(xdl.global_initializers()) res = xdl.execute(res) print res
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.int32, shape=[4], initializer=xdl.Constant(8.0)) execute(xdl.variable_registers()) execute(xdl.global_initializers()) ret = execute(var.value) self.assertTrue((ret == np.array([8, 8, 8, 8])).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[4,2], initializer=xdl.Zeros()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_sparse_assign_op( var_name="w", var_type="index", ids=np.array([1,2], dtype=np.int32), values=np.array([[1,2],[3,4]], dtype=np.float32)) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([[0,0],[1,2],[3,4],[0,0]], dtype=np.float32)).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.int32, shape=[4, 2], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_sparse_pull_op(var_name="w", var_type="index", save_ratio=1.0, otype=DataType.int32, ids=np.array([1, 3], dtype=np.int32)) ret = execute(op) self.assertTrue((ret == np.array([[1, 1], [1, 1]])).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[4], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_apply_moving_average_op(var_name="w", moment=0.8, value=np.array([1, 2, 3, 3], dtype=np.float32)) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([1., 1.2, 1.4, 1.4], dtype=np.float32)).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.int64, shape=[4, 8], vtype=VarType.Hash, initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) mark_op = xdl.ps_mark_op(var_name="w", ids=np.array([[10, 10], [10, 10], [12, 12]], dtype=np.int64), pattern="g", i=12) execute(mark_op) filter_op = xdl.ps_filter_op(var_name="w", pattern="i==g", i=12, d=0.5) execute(filter_op)
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[4,1], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_sparse_apply_adagrad_op( learning_rate=np.array(0.5, dtype=np.float), initial_accumulator_value=np.array(0.0, dtype=np.float), grad=np.array([[1],[2]], dtype=np.float32), indices=np.array([1,2], dtype=np.int32), var_name="w", var_type="index") execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([[1],[0.5],[0.5],[1]], dtype=np.float32)).all()) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([[1],[0.14644662],[0.14644662],[1]], dtype=np.float32)).all())
def test_all(self): var = xdl.Variable(name="w", dtype=xdl.DT_FLOAT, shape=[4], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_dense_apply_adagrad_op( learning_rate=np.array(0.5, dtype=np.float), initial_accumulator_value=np.array(0.0, dtype=np.float), grad=np.array([1,2,3,4], dtype=np.float32), var_name="w", var_type="index") execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([0.5,0.5,0.5,0.5])).all()) execute(op) ret = execute(var.value) print(ret) self.assertTrue((ret == np.array([0.14644662,0.14644662,0.14644662,0.14644662], dtype=np.float32)).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[4], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_dense_apply_ftrl_op( learning_rate=np.array(0.1, dtype=np.float), learning_rate_power=np.array(-0.5, dtype=np.float), initial_accumulator_value=np.array(0.1, dtype=np.float), l1_reg=np.array(0, dtype=np.float), l2_reg=np.array(0, dtype=np.float), grad=np.array([1,2,3,4], dtype=np.float32), var_name="w", var_type="index") execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([0.6031424,0.7450533,0.7957225,0.8215], dtype=np.float32)).all()) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([0.5341358,0.6747804,0.7252074,0.75089955], dtype=np.float32)).all())
def test_all(self): var = xdl.Variable(name="w", dtype=DataType.float, shape=[4], initializer=xdl.Ones()) execute(xdl.variable_registers()) execute(xdl.global_initializers()) op = xdl.ps_dense_apply_momentum_op(learning_rate=np.array( 0.5, dtype=np.float), momentum=np.array(0.9, dtype=np.float), grad=np.array([1, 2, 3, 4], dtype=np.float32), var_name="w", var_type="index", use_nesterov=False) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([0.5, 0, -0.5, -1], dtype=np.float32)).all()) execute(op) ret = execute(var.value) self.assertTrue((ret == np.array([-0.45, -1.9, -3.35, -4.8], dtype=np.float32)).all())