예제 #1
0
 def test_all(self):
     var = xdl.Variable(name="w",
                        dtype=DataType.float,
                        shape=[4, 2],
                        initializer=xdl.Zeros())
     execute(xdl.variable_registers())
     execute(xdl.global_initializers())
     op = xdl.ps_sparse_assign_op(var_name="w",
                                  var_type="index",
                                  ids=np.array([1, 2], dtype=np.int32),
                                  values=np.array([[1, 2], [3, 4]],
                                                  dtype=np.float32))
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([[0, 0], [1, 2], [3, 4], [0, 0]],
                                      dtype=np.float32)).all())
예제 #2
0
 def test_all(self):
     var = xdl.Variable(name="w", dtype=DataType.float, shape=[4,1], initializer=xdl.Ones())
     execute(xdl.variable_registers())
     execute(xdl.global_initializers())
     op = xdl.ps_sparse_apply_adagrad_op(
         learning_rate=np.array(0.5, dtype=np.float),
         initial_accumulator_value=np.array(0.0, dtype=np.float),
         grad=np.array([[1],[2]], dtype=np.float32),
         indices=np.array([1,2], dtype=np.int32),
         var_name="w",
         var_type="index")
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([[1],[0.5],[0.5],[1]], dtype=np.float32)).all())
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([[1],[0.14644662],[0.14644662],[1]], dtype=np.float32)).all())
예제 #3
0
 def test_all(self):
     var = xdl.Variable(name="w", dtype=xdl.DT_FLOAT, shape=[4], initializer=xdl.Ones())
     execute(xdl.variable_registers())
     execute(xdl.global_initializers())
     op = xdl.ps_dense_apply_adagrad_op(
         learning_rate=np.array(0.5, dtype=np.float),
         initial_accumulator_value=np.array(0.0, dtype=np.float),
         grad=np.array([1,2,3,4], dtype=np.float32),
         var_name="w",
         var_type="index")
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([0.5,0.5,0.5,0.5])).all())
     execute(op)
     ret = execute(var.value)
     print(ret)
     self.assertTrue((ret == np.array([0.14644662,0.14644662,0.14644662,0.14644662], dtype=np.float32)).all())
예제 #4
0
 def test_all(self):
     var = xdl.Variable(name="w", dtype=DataType.float, shape=[4,1], initializer=xdl.Ones())
     execute(xdl.variable_registers())
     execute(xdl.global_initializers())
     op = xdl.ps_sparse_apply_momentum_op(
         learning_rate=np.array(0.5, dtype=np.float),
         momentum=np.array(0.9, dtype=np.float),
         grad=np.array([[1],[2]], dtype=np.float32),
         indices=np.array([1,2], dtype=np.int32),
         var_name="w",
         var_type="index",
         use_nesterov=False)
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([[1],[0.5],[0],[1]], dtype=np.float32)).all())
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([[1],[-0.45],[-1.9],[1]], dtype=np.float32)).all())
 def test_all(self):
     var = xdl.Variable(name="w", dtype=DataType.float, shape=[4], initializer=xdl.Ones())
     execute(xdl.variable_registers())
     execute(xdl.global_initializers())
     op = xdl.ps_dense_apply_ftrl_op(
         learning_rate=np.array(0.1, dtype=np.float),
         learning_rate_power=np.array(-0.5, dtype=np.float),
         initial_accumulator_value=np.array(0.1, dtype=np.float),
         l1_reg=np.array(0, dtype=np.float),
         l2_reg=np.array(0, dtype=np.float),
         grad=np.array([1,2,3,4], dtype=np.float32),
         var_name="w",
         var_type="index")
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([0.6031424,0.7450533,0.7957225,0.8215], dtype=np.float32)).all())
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([0.5341358,0.6747804,0.7252074,0.75089955], dtype=np.float32)).all())
예제 #6
0
 def test_all(self):
     var = xdl.Variable(name="w", dtype=DataType.float, shape=[4,1], initializer=xdl.Ones())
     execute(xdl.variable_registers())
     execute(xdl.global_initializers())
     op = xdl.ps_sparse_apply_adam_op(
         beta1=np.array(0.9, dtype=np.float),
         beta2=np.array(0.999, dtype=np.float),
         epsilon=np.array(1e-08, dtype=np.float),
         learning_rate=np.array(0.1, dtype=np.float),
         grad=np.array([[1],[2]], dtype=np.float32),
         indices=np.array([1,2], dtype=np.int32),
         lr_decay=True,
         var_name="w",
         var_type="index")
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([[1],[0.90000004],[0.90000004],[1]], dtype=np.float32)).all())
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([[1],[0.8000001],[0.8000001],[1]], dtype=np.float32)).all())
예제 #7
0
 def test_all(self):
     var = xdl.Variable(name="w",
                        dtype=DataType.int32,
                        shape=[4],
                        initializer=xdl.Zeros())
     execute(xdl.variable_registers())
     execute(xdl.global_initializers())
     save_op = xdl.ps_save_op(ckpt_version=np.array(123, dtype=np.int8))
     execute(save_op)
     add_op = xdl.ps_assign_add_op(var_name="w",
                                   var_type="index",
                                   delta=np.array([1, 2, 3, 4],
                                                  dtype=np.int32))
     execute(add_op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([1, 2, 3, 4])).all())
     restore_op = xdl.ps_restore_op(
         ckpt_version=np.array(123, dtype=np.int8))
     execute(restore_op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([0, 0, 0, 0])).all())
 def test_all(self):
     var = xdl.Variable(name="w",
                        dtype=DataType.float,
                        shape=[4],
                        initializer=xdl.Ones())
     execute(xdl.variable_registers())
     execute(xdl.global_initializers())
     op = xdl.ps_dense_apply_momentum_op(learning_rate=np.array(
         0.5, dtype=np.float),
                                         momentum=np.array(0.9,
                                                           dtype=np.float),
                                         grad=np.array([1, 2, 3, 4],
                                                       dtype=np.float32),
                                         var_name="w",
                                         var_type="index",
                                         use_nesterov=False)
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([0.5, 0, -0.5, -1],
                                      dtype=np.float32)).all())
     execute(op)
     ret = execute(var.value)
     self.assertTrue((ret == np.array([-0.45, -1.9, -3.35, -4.8],
                                      dtype=np.float32)).all())