Esempio n. 1
0
def test_fmap():
    t = Tensor([[1, 2], [3, 4]])

    def func(data):
        return data * 2

    assert all_close(t.fmap(func), [[2, 4], [6, 8]])
Esempio n. 2
0
def test_getitem():
    n = np.array([[1, 2], [3, 4]])
    t = Tensor(n)
    assert t[:] == n
    n = np.array([[1, 2], [3]])
    t = Tensor(n)
    assert t[1] == 3
Esempio n. 3
0
 def _convert(self, v):
     result = Tensor(v)
     if self.config(self.KEYS.CONFIG.BATCH_SIZE) is not None:
         shape = result.data.shape.as_list()
         shape[0] = self.config(self.KEYS.CONFIG.BATCH_SIZE)
         if shape.count(None) == 1:
             shape[shape.index(None)] = -1
         result = Tensor(tf.reshape(result.data, shape))
     return result
Esempio n. 4
0
def test_log_poisson_loss_with_tensor():
    x = tf.constant([math.e], dtype=tf.float32)
    y = tf.constant([1], dtype=tf.float32)
    x = Tensor(x)
    y = Tensor(y)
    res = log_poisson_loss(x, y)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        res = sess.run(res.unbox())
    assert res == 0
Esempio n. 5
0
def test_poisson_loss_with_tensor():
    x = tf.constant([math.e, math.e, math.e], dtype=tf.float32)
    y = tf.constant([1, 1, 1], dtype=tf.float32)
    x = Tensor(x)
    y = Tensor(y)
    res = poisson_loss(y, x)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        res = sess.run(res.unbox())
    assert abs((math.e - res) - 1) < math.pow(10, -7)
Esempio n. 6
0
def test_absolute_error_with_tensor():
    x = tf.constant([-2, -2, -2], dtype=tf.float32)
    y = tf.constant([1, 1, 1], dtype=tf.float32)
    x = Tensor(x)
    y = Tensor(y)
    res = absolute_error(y, x)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        res = sess.run(res.unbox())
    assert res == 3
Esempio n. 7
0
def test_composite_loss_with_tensor():
    x = tf.constant([2], dtype=tf.float32)
    y = tf.constant([1], dtype=tf.float32)
    x = Tensor(x)
    y = Tensor(y)
    loss = {mean_square_error: 1, absolute_error: 2}
    res = composite_loss(y, x, loss)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        res = sess.run(res.unbox())
    assert res == 3
Esempio n. 8
0
def test_alatten():
    f1 = flatten(Tensor([1, 2, 3, 4]))
    assert f1.shape == [4, 1]
    assert f1.ndim == 2

    f2 = flatten(Tensor([[1, 2, 3], [3, 4, 5]]))
    assert f2.shape == [2, 3]
    assert f2.ndim == 2

    f3 = flatten(
        Tensor([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
                [[1, 2, 3], [4, 5, 6]]]))
    assert f3.shape == [3, 6]
    assert f3.ndim == 2
Esempio n. 9
0
def _(x, nb_split, name='data_splitter'):
    x = x.unbox()
    shape_x = shape_as_list(x)
    split_size = shape_x[0] // nb_split
    res = {}
    offset = [0 for i in shape_x]
    slice_size = copy.copy(shape_x)
    slice_size[0] = split_size
    for i in range(nb_split - 1):
        res['slice{}'.format(i)] = Tensor(tf.slice(x, offset, slice_size))
        offset[0] = offset[0] + split_size
    slice_size[0] = shape_x[0] - offset[0]
    res['slice{}'.format(nb_split - 1)] = Tensor(
        tf.slice(x, offset, slice_size))
    return res
Esempio n. 10
0
def _(label, infer, losses):
    if isinstance(infer, Tensor):
        infer = infer.unbox()
    label = label.unbox()
    with tf.variable_scope("composite_loss"):
        weighted_loss = [k(label, infer) * v for k, v in losses.items()]
        return Tensor(tf.reduce_sum(weighted_loss))
Esempio n. 11
0
def _(label, data, *, compute_full_loss=False):
    if isinstance(data, Tensor):
        data = data.unbox()
    label = label.unbox()
    label = tf.maximum(label, 0.0)
    data = tf.maximum(data, 0.0)
    return Tensor(tf.reduce_mean(tf.keras.losses.poisson(label, data)))
Esempio n. 12
0
def histo_points_to_box(points: list,
                        box: Box,
                        grid: list,
                        weights: list = None):
    """
    grid is the grid num, 
    when weights is none, result is number collection matrix 
    """
    if weights is None:
        weights = [1.0] * len(points)
    else:
        weights = weights
    result = np.zeros(grid)
    subbox = divide(box, grid)
    #subbox = [b.translate(b.shape / 2 - box.shape / 2) for b in subbox]
    p_index = list()
    for p, w in zip(points, weights):
        for b in subbox:
            if b.is_collision(p) == True:
                p_index.append(b.origin)
                ix = int(((b.origin.x - box.origin.x) + 0.5 * box.shape.x) /
                         b.shape.x)
                iy = int(((b.origin.y - box.origin.y) + 0.5 * box.shape.y) /
                         b.shape.y)
                iz = int((0.5 * box.shape.z - (b.origin.z - box.origin.z)) /
                         b.shape.z)
                result[ix, iy, iz] += w
                #print([ix, iy,iz])
    return p_index, Tensor(result)
Esempio n. 13
0
def _(input_, target, name='random_crop'):
    input_ = input_.unbox()
    with tf.name_scope(name):
        input_shape = shape_as_list(input_)
        target_shape = shape_as_list(target)
        random_offset = tf.py_func(random_crop_offset,
                                   [input_shape, target_shape], tf.int32)
        return Tensor(tf.slice(input_, random_offset, target_shape))
Esempio n. 14
0
def _(input_, offset=None, name="boundary_crop"):
    input_ = input_.unbox()
    with tf.name_scope(name):
        shape = shape_as_list(input_)
        if len(offset) == 2:
            offset = [0] + list(offset) + [0]
        shape_output = [s - 2 * o for s, o in zip(shape, offset)]
        return Tensor(tf.slice(input_, offset, shape_output))
Esempio n. 15
0
def _(log_label, data, *, compute_full_loss=False):
    if isinstance(data, Tensor):
        data = data.unbox()
    log_label = log_label.unbox()
    data = tf.maximum(data, 0.0)
    return Tensor(
        tf.reduce_mean(
            tf.nn.log_poisson_loss(log_label, data, compute_full_loss)))
Esempio n. 16
0
def test_relu_with_tensor():
    x = tf.constant([-1, -2, -3, 1, 2, 3], dtype=tf.float32)
    x = Tensor(x)
    res = relu(x)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        res = sess.run(res.unbox())
    assert all_close(res, [0, 0, 0, 1, 2, 3]) is True
Esempio n. 17
0
def to_tensors(dataset: Dataset) -> Dict[Tensor]:
    """
    Convert dataset to Tensor.
    :param dataset: Dataset, currently it's only a wrapper of tf.data.Dataset
    :return: dict of Tensors, currently a doufo.Tensor wrapped tf.Tensor.
    """
    with tf.variable_scope('finalize_dataset_to_dict_of_tensors'):
        result = dataset.unbox().make_one_shot_iterator().get_next()
        return {k: Tensor(v) for k, v in result.items()}
Esempio n. 18
0
def test_swish_with_tensor():
    x = tf.constant([-1, -2, 0, 1, 2], dtype=tf.float32)
    x = Tensor(x)
    res = swish(x)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        res = sess.run(res.unbox())
    assert res[2] == 0
    assert (res[0] - res[1]) < (res[4] - res[3])
Esempio n. 19
0
def test_data_splitter_with_tensor():
    x = tf.ones([32, 64, 64, 3], dtype=tf.float32)
    x = Tensor(x)
    res1 = data_splitter(x, 4)
    assert all(
        map(lambda x: shape_as_list(x) == [8, 64, 64, 3],
            [res1['slice{}'.format(i)] for i in range(4)])) is True
    assert all(
        map(lambda x: isinstance(x, Tensor),
            [res1['slice{}'.format(i)] for i in range(4)])) is True
Esempio n. 20
0
def _(input_, target, offset=None, name='align_crop'):
    input_ = input_.unbox()
    with tf.name_scope(name):
        shape_input = shape_as_list(input_)
        shape_output = shape_as_list(target)
        if offset is None:
            offset = [0] + [(shape_input[i] - shape_output[i]) // 2
                            for i in range(1, 3)] + [0]
        shape_output[-1] = shape_input[-1]
        return Tensor(tf.slice(input_, offset, shape_output))
Esempio n. 21
0
def test_elu_with_tensor():
    x = tf.constant([-1, 0, 1], dtype=tf.float32)
    x = Tensor(x)
    res = elu(x)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        res = sess.run(res.unbox())
    assert abs(res[0] - (-0.63212055)) < math.pow(10, -8)
    assert res[1] == 0
    assert res[2] == 1
Esempio n. 22
0
def test_selu_with_tensor():
    x = tf.constant([-1, 0, 1], dtype=tf.float32)
    x = Tensor(x)
    res = selu(x)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        res = sess.run(res.unbox())
    assert abs(res[0] - (-1.1113307)) < math.pow(10, -7)
    assert abs(res[2] - 1.050701) < math.pow(10, -6)
    assert res[1] == 0
Esempio n. 23
0
def test_to_tensor_like_ndarray():
    t = Tensor([[1, 4], [2, 5], [3, 6]])
    assert to_tensor_like(t) == Tensor([[1, 4], [2, 5], [3, 6]])
Esempio n. 24
0
def test_ndarray_Tensor():
    n = np.array([[1, 2, 3], [4, 5, 6]])
    t = Tensor([[1, 4], [2, 5], [3, 6]])
    assert matmul(n, t) == Tensor([[14, 32], [32, 27]])
Esempio n. 25
0
def test_Tensor_ndarray():
    t = Tensor([[1, 4], [2, 5], [3, 6]])
    n = np.array([[1, 2, 3], [4, 5, 6]])
    assert matmul(t, n) == Tensor([[17, 22, 27], [22, 29, 36], [27, 36, 45]])
Esempio n. 26
0
def test_Tensor_Tensor():
    t1 = Tensor([[1, 4], [2, 5], [3, 6]])
    t2 = Tensor([[1, 2, 3], [4, 5, 6]])

    assert matmul(t1, t2) == Tensor([[17, 22, 27], [22, 29, 36], [27, 36, 45]])
Esempio n. 27
0
def test_str():
    assert str(Tensor([1, 2, 3])) == str(np.array([1, 2, 3]))
Esempio n. 28
0
def _(label, infer):
    if isinstance(infer, Tensor):
        infer = infer.unbox()
    label = label.unbox()
    return Tensor(tf.losses.absolute_difference(label, infer))
Esempio n. 29
0
def _(label, infer):
    if isinstance(infer, Tensor):
        infer = infer.unbox()
    label = label.unbox()
    return Tensor(tf.losses.mean_squared_error(label, infer))
Esempio n. 30
0
 def fmap(self, f):
     result = f(self.unbox())
     if isinstance(result, DataClass):
         return DataArray(result, type(result))
     from doufo.tensor import Tensor
     return Tensor(result)