コード例 #1
0
def test_MemOps():
  error = 0
  t1 = tensor.Tensor()
  t2 = tensor.Tensor()
#  arr = np.ones((2,2))
  #arr = tensor.VectorDouble([2,2,2])
  arr = tensor.VectorFloat([2,2,2])
  t1.Reshape(tensor.VectorInt([3,1]))
  t1.reinit(arr, 3)
#  t2.reinit(arr)

# k = tensor.Tensor()
# k.assign( t1[0] )

#  if sum(t1.data - t2.data).sum() != 0:
#    error += 1

#  print(sum(t1.data - t3.data).sum())
#  arr = np.ones((50,1))
#  t1.reinit(arr)
#  t3 = t1
#  if sum(t1.data - t3.data).sum() != 0:
#    error += 1
#  arr = np.ones(t1.shape)
#  if sum(t1.data - arr).sum() != 0:
#    error += 1
#  t1 = t2
#  if sum(t3.data - arr).sum() != 0:
#    error += 1
  return error
コード例 #2
0
    def apply_with_lr(self, epoch, lr, grad, value, name, step):
        '''Update one parameter object.

        Args:
            step(int): the accumulated training iterations, not the iteration ID
        '''
        if grad.is_empty():
            return value

        assert step != -1, 'step should >= 0'
        if epoch != self.last_epoch or step != self.last_step:
            self.t += 1
        grad = self.apply_regularizer_constraint(epoch, value, grad, name,
                                                 step)
        if name is not None and name in self.learning_rate_multiplier:
            lr = lr * self.learning_rate_multiplier[name]
        if name not in self.m or name not in self.v:
            self.m[name] = tensor.Tensor(grad.shape, grad.device, grad.dtype)
            self.m[name].set_value(0)
            self.v[name] = tensor.Tensor(grad.shape, grad.device, grad.dtype)
            self.v[name].set_value(0)

        self.m[name] *= self.beta_1
        tensor.axpy(1 - self.beta_1, grad, self.m[name])
        self.v[name] *= self.beta_2
        tensor.axpy(1 - self.beta_2, tensor.square(grad), self.v[name])
        alpha = lr * math.sqrt(1 - math.pow(self.beta_2, self.t)) \
            / (1 - math.pow(self.beta_1, self.t))
        value -= alpha * self.m[name] / (tensor.sqrt(self.v[name]) +
                                         self.epsilon)
        return value
コード例 #3
0
 def __init__(self,
              data=nn.Dataset(tensor.Tensor([1], [1]),
                              tensor.Tensor([1], [1])),
              network=nn.Network(),
              optimizer=nn.optimizer.SGD(0.01)):
     self.data = data
     self.network = network
     self.optimizer = optimizer
コード例 #4
0
def init():
    array = []
    table = []
    for line in urlopen(
            "https://archive.ics.uci.edu/ml/machine-learning-databases/tic-tac-toe/tic-tac-toe.data"
    ):
        decoded_line = line.decode('UTF-8').lower().strip()
        for i in range(0, 17, 2):
            if (decoded_line[i] == 'x'):
                array.extend([1., 0., 0.])
            elif (decoded_line[i] == 'o'):
                array.extend([0., 1., 0.])
            else:
                array.extend([0., 0., 0.])

        if (decoded_line[18] == 'p'):
            table.extend([1, 0])  #one - hot
        else:
            table.extend([0, 1])

    data_count = len(table) // 2
    if (len(table) != len(array)):
        print("error")
    train_count = data_count * 4 // 5
    test_count = data_count - train_count

    table = tensor.Tensor(table, [data_count, 2])
    data = tensor.Tensor(array, [data_count, 27])

    train_data = tensor.create_zeros([train_count, 27])
    train_table = tensor.create_zeros([train_count, 2])

    test_data = tensor.create_zeros([test_count, 27])
    test_table = tensor.create_zeros([test_count, 2])

    choice_list = tensor.create_arange(0, data_count)
    tensor.set_shuffle(choice_list)

    train_choice = tensor.create_zeros([train_count], int)
    test_choice = tensor.create_zeros([test_count], int)

    tensor.copy(choice_list, 0, train_count, train_choice)
    tensor.copy(choice_list, train_count, test_count, test_choice)

    tensor.copy_row(data, train_choice, train_data)
    tensor.copy_row(table, train_choice, train_table)
    tensor.copy_row(data, test_choice, test_data)
    tensor.copy_row(table, test_choice, test_table)

    with open('ttt_train_data.bin', 'wb') as f:
        pickle.dump(train_data, f)
    with open('ttt_train_table.bin', 'wb') as f:
        pickle.dump(train_table, f)
    with open('ttt_test_data.bin', 'wb') as f:
        pickle.dump(test_data, f)
    with open('ttt_test_table.bin', 'wb') as f:
        pickle.dump(test_table, f)
    print("done")
コード例 #5
0
ファイル: functional.py プロジェクト: LongJohnCoder/nanograd
    def backward(ctx, grad_output):
        a, b = ctx.saved_tensors

        grad_a = np.matmul(grad_output.data, np.transpose(b.data))
        grad_b = np.matmul(np.transpose(a.data), grad_output.data)

        grad_a = tensor.Tensor(grad_a)
        grad_b = tensor.Tensor(grad_b)

        return grad_a, grad_b
コード例 #6
0
ファイル: functional.py プロジェクト: LongJohnCoder/nanograd
    def backward(ctx, grad_output):
        a, b = ctx.saved_tensors

        grad_a = grad_output.data * b.data
        grad_b = grad_output.data * a.data

        grad_a = tensor.Tensor(unbroadcast(grad_a, a.shape))
        grad_b = tensor.Tensor(unbroadcast(grad_b, b.shape))

        return grad_a, grad_b
コード例 #7
0
 def __init__(self, w, b):
     self.w = w
     self.b = b
     self.x = None
     self.dout = None
     self.max_index = 0
     self.out = tensor.Tensor([0], [1, 1])
     self.dx = tensor.Tensor([0], [1, 1])
     self.dw = w.copy()
     self.db = b.copy()
コード例 #8
0
ファイル: test_nn.py プロジェクト: d-kicinski/tensor
def test_relu():
    relu = ReLU()
    tensor = var([[1., -1.], [-1., 1.]])
    forward = relu(tensor)
    expected_forward = ts.Tensor([[1., 0.], [0., 1.]])
    np.testing.assert_equal(forward.value.numpy, expected_forward.numpy)

    d_output = ts.Tensor([[2., 2.], [2., 2.]])
    relu.backward(d_output)
    expected_backward = ts.Tensor([[2., 0.], [0., 2.]])
    np.testing.assert_equal(tensor.grad.numpy, expected_backward.numpy)
コード例 #9
0
    def __next__(self) -> Tuple[ts.Tensor, ts.Tensor]:
        if self._n + 1 == len(self._data) // self._batch_size:
            raise StopIteration

        images = self._get_chunk(self._data)
        labels = self._get_chunk(self._target)
        self._n += 1

        if self._transform is not None:
            images = self._transform(images)

        return ts.Tensor(images), ts.Tensor(labels)
コード例 #10
0
ファイル: conv3d_module.py プロジェクト: pirate1111/test
 def __init__(self, filter, bias, stride, pad, padding):
     self.filter = filter
     self.bias = bias
     self.dfilter = filter.copy()
     self.dbias = bias.copy()
     self.out = tensor.Tensor([1], [1, 1, 1, 1])
     self.x = None
     self.dout = None
     self.dx = tensor.Tensor([1], [1, 1, 1, 1])
     self.stride = stride
     self.pad = pad
     self.padding = padding
     self.max_index = 0
コード例 #11
0
ファイル: ssf.py プロジェクト: GiggleLiu/pymps
def SSFC2E_F1(kets, spaceconfig, maxN=55):
    '''
    Sweep fidelity from center to edge, the single version taking fermionic sign into consideration.

    Args:
        kets (len-2 list): the kets to sweep fidelity.
        spaceconfig (<SuperSpaceConfig>):
        maxN (int, the maximum retained singular value for usv mode): and the maximum retained states for direct mode.
    '''
    nsite = kets[0].nsite
    # prepair kets.
    bra = kets[0].tobra(labels=[kets[0].labels[0], kets[0].labels[1] + '\''])
    ket = kets[1]
    ket >> (nsite / 2 - ket.l, 1e-8, Inf)
    bra >> (nsite / 2 - bra.l, 1e-8, Inf)
    l = kets[0].forder.index(0) - nsite / 2  # bulk size/2.

    rlink_axis = kets[0].rlink_axis
    edge_labels_l = [
        bra.ML[bra.l - 1].labels[rlink_axis],
        ket.ML[ket.l - 1].labels[rlink_axis]
    ]
    llink_axis = kets[0].llink_axis
    bra.BL[0].labels[llink_axis] += '@'
    ket.BL[0].labels[llink_axis] += '@'
    edge_labels_r = [
        bra.BL[0].labels[llink_axis], ket.BL[0].labels[llink_axis]
    ]
    Ci = tensor.Tensor(
        diag(bra.S),
        labels=[edge_labels_l[0], edge_labels_r[0]]) * tensor.Tensor(
            diag(ket.S), labels=[edge_labels_l[1], edge_labels_r[1]])
    fs = [1]
    # get the bulk overlap matrix.
    for i in range(l):
        t0 = time.time()
        site_l = nsite / 2 - i - 1
        site_r = nsite / 2 + i
        Ci = bra.get(site_l, attach_S='B') * \
            (ket.get(site_l, attach_S='B') * Ci)
        Ci = Ci * bra.get(site_r, attach_S='A') * ket.get(site_r, attach_S='A')
        Ci = Ci.chorder(array([0, 2, 1, 3]))
        t1 = time.time()
        print('Update %s, Elapse->%s' % (i, t1 - t0))
    S = svdvals(Ci.reshape([Ci.shape[0] * Ci.shape[1], -1]))
    f = sum(S)
    print('Get Fidlity for l = %s: %s.' % (l, f))
    return f
コード例 #12
0
 def __init__(self):
     self.dx = tensor.Tensor([0], [1, 1])
     self.x = None
     self.out = 0
     self.dout = None
     self.t = None
     self.max_index = 0
コード例 #13
0
 def totensor(self):
     # returns a new Tensor object that contains the same values
     data = np.zeros(self.shape)
     for i in range(0, len(self.vals)):
         data.put(int(tools.sub2ind(self.shape, self.subs[i])),
                  self.vals[i])
     return tensor.Tensor(data)
コード例 #14
0
ファイル: sonnx.py プロジェクト: ShichengChen/incubator-singa
    def prepare(
        cls,
        model,  # type: ModelProto
        device,  # type: singa device
        **kwargs  # type: Any
    ):  # type: (...) -> Optional[BackendRep]
        '''
        Args:
            model: onnx model proto
            device: singa device
        Return:
            SingaBackendRep instance
        '''
        super(SingaBackend, cls).prepare(model, device, **kwargs)
        name2tensor = {}
        for node in model.graph.node:
            if (node.op_type == 'Constant'):
                data = helper.get_attribute_value(node.attribute[0])
                requires_grad, stores_grad = True, True
                if len(node.attribute) == 3:
                    requires_grad = helper.get_attribute_value(
                        node.attribute[1])
                    stores_grad = helper.get_attribute_value(node.attribute[2])
                t = tensor.Tensor(device=device,
                                  data=numpy_helper.to_array(data),
                                  requires_grad=requires_grad,
                                  stores_grad=stores_grad)

                name2tensor[node.output[0]] = t

        return SingaBackendRep(model, device, name2tensor)
コード例 #15
0
ファイル: ssf.py プロジェクト: GiggleLiu/pymps
def SSFLR(kets, direction):
    '''
    Sweep fidelity for left to right or right to left.
    '''
    bra = kets[0].tobra(labels=[kets[0].labels[0], kets[0].labels[1] + '\''])
    ket = kets[1]
    nsite = ket.nsite
    if direction == '->':
        [keti << keti.l - 1 for keti in [bra, ket]]
        step = 1
        clink_axis = kets[0].llink_axis
        attach_S = 'A'
        edge_labels = [
            bra.ML[0].labels[clink_axis], ket.ML[0].labels[clink_axis]
        ]
    else:
        step = -1
        clink_axis = kets[0].rlink_axis
        attach_S = 'B'
        [keti >> nsite - 1 - keti.l for keti in [bra, ket]]
        edge_labels = [
            bra.get(nsite - 1).labels[clink_axis],
            ket.get(nsite - 1).labels[clink_axis]
        ]
    Ri = tensor.Tensor(identity(1), labels=edge_labels)
    fs = [1]
    for i in range(nsite):
        sitei = i if direction == '->' else nsite - i - 1
        Ri = (bra.get(sitei, attach_S=attach_S) * Ri *
              ket.get(sitei, attach_S=attach_S))
        S = svdvals(Ri)
        fs.append(sum(S))
        print(i, sum(S))
    return fs
コード例 #16
0
 def backward(self, flag, grads):
     assert len(grads) > 1, 'There must be multiple gradients'
     dx = tensor.Tensor()
     dx.reset_like(grads[0])
     dx.set_value(0)
     for g in grads:
         dx += g
     return dx, []
コード例 #17
0
def test_Tensor():
  error = 0
  t = tensor.Tensor()
# print("tensor see {0}".format(t.count))
  m = t.L2
# print("tensor see {0}".format(m))
  t.Reshape(tensor.VectorInt([2, 2, 2]))
  return error
コード例 #18
0
ファイル: layer.py プロジェクト: hslee1539/tic_tac_toe
 def __init__(self, exp_func=math.exp, log_func=math.log):
     self.exp_func = exp_func
     self.log_func = log_func
     self.loss = None
     self.y = None
     self.t = None
     self.out = tensor.Tensor([0], [1])
     self.tmp_sum = None
     self.batch_size = 0
コード例 #19
0
 def forward(self, flag, inputs):
     assert len(inputs) > 1, 'There must be multiple input tensors'
     self.num_input = len(inputs)
     output = tensor.Tensor()
     output.reset_like(inputs[0])
     output.set_value(0)
     for x in inputs:
         output += x
     return output
コード例 #20
0
 def findAccuracy(self):
     acc = tensor.Tensor([0], [1])
     for i, x, y in self.data.normalRange():
         self.network.setX(x)
         result = self.network.forward()
         for j in range(len(result.array)):
             if (result.array[j] > 0.5):
                 acc.array[0] += y.array[j]
     acc.array[0] /= y.shape[0]
     return acc
コード例 #21
0
ファイル: layer.py プロジェクト: hslee1539/tic_tac_toe
 def __init__(self, W, b):
     self.W = W
     self.dW = W.copy()
     self.W_t = tensor.create_transpose(W)
     self.b = b
     self.db = b.copy()
     self.out = tensor.Tensor([0], [1, 1])
     self.dout = None
     self.x = None
     self.x_t = None
コード例 #22
0
ファイル: layer.py プロジェクト: hslee1539/tic_tac_toe
 def accuracy(self, table):
     """forward를 반드시 해야 하고, backward 이전에 사용해야 합니다."""
     out = self.layers[-1].out
     out_argmax = tensor.argmax(out, -1, tensor.create_sum(out, -1))
     table_argmax = tensor.argmax(table, -1, tensor.create_sum(table, -1))
     eq = tensor.function_elment_wise(
         out_argmax, table_argmax, Layers._equal,
         tensor.create_element_wise_product(out_argmax, table_argmax, int))
     reduce_sum = tensor.sum_axis(eq, 0, tensor.Tensor([1], [1]))
     return reduce_sum.array[0] / len(out_argmax.array)
コード例 #23
0
ファイル: functional.py プロジェクト: LongJohnCoder/nanograd
    def forward(ctx, a):
        if not type(a).__name__ == "Tensor":
            raise Exception("Sigmoid can only be applied to tensors")

        ctx.save_for_backward(a)
        out = tensor.Tensor(sigmoid(a.data),
                            requires_grad=a.requires_grad,
                            is_leaf=not a.requires_grad)
        out.children = [a]
        out.op = 'sigmoid'
        return out
コード例 #24
0
ファイル: functional.py プロジェクト: LongJohnCoder/nanograd
    def forward(ctx, a):
        if not type(a).__name__ == "Tensor":
            raise Exception(
                f"Only neg of tensor is supported. Got: {type(a).__name__}")

        out = tensor.Tensor(-a.data,
                            requires_grad=a.requires_grad,
                            is_leaf=not a.requires_grad)
        out.children = [a]
        out.op = 'neg'
        return out
コード例 #25
0
ファイル: functional.py プロジェクト: LongJohnCoder/nanograd
    def forward(ctx, a):
        if not type(a).__name__ == 'Tensor':
            raise Exception("ReLU can only be applied to tensors")

        ctx.save_for_backward(a)

        out = tensor.Tensor(np.maximum(a.data, 0),
                            requires_grad=a.requires_grad,
                            is_leaf=not a.requires_grad)
        out.children = [a]
        out.op = 'relu'
        return out
コード例 #26
0
ファイル: simulation.py プロジェクト: zhangerjun/osmosis
    def __init__(self,
                 bvecs,
                 bvals,
                 odf,
                 iso=False,
                 scaling_factor=SCALE_FACTOR,
                 axial_diffusivity=AD,
                 radial_diffusivity=RD):
        """
        The signal in a single voxel is computed from the convolution of a
        response function with the ODF
        
        Parameters
        ----------

        bvecs: 3 by n array
             unit vectors on the sphere.
        
        bvals: 1 by n array
             The measurement parameter defining where on the curve we measure
             the exponential decay of the signal

        odf: an ODF class instance
            The representation of the orientation distribution function. Note
            that this class also has bvecs, but these bvecs don't have to be
            the same bvecs as the ones of this class. bvecs In this class refer
            to measurement directions, while the bvecs in the ODF class refer
            to directions of fibers within the voxel (not necessarily the
            same...) 

        iso : float (optional)
            Whether and how much of an isotropic component to add to the
            signal. Default: False - no isotropic component
            
        scaling_factor: float (optional)
            To get the right units on the ADC, sometimes the b value needs to
            be scaled. Typically, divided by 1000 (default).

        axial_diffusivity, radial_diffusivity: float
            These parameters
            
        """

        self.bvecs = bvecs
        self.bvals = bvals/scaling_factor
        self.odf = odf
        
        # We assume that the response function is a cigar shaped tensor:
        self.Q = np.array([[axial_diffusivity, 0, 0],
                           [0, radial_diffusivity, 0],
                           [0, 0, radial_diffusivity]])
        self.response_function = ozt.Tensor(self.Q, self.bvecs, self.bvals)
        self.iso = iso
コード例 #27
0
ファイル: functional.py プロジェクト: LongJohnCoder/nanograd
    def forward(ctx, a):
        if not len(a.shape) == 2:
            raise Exception("Arg for Transpose must be 2D tensor: {}".format(
                a.shape))

        requires_grad = a.requires_grad
        out = tensor.Tensor(a.data.T,
                            requires_grad=requires_grad,
                            is_leaf=not requires_grad)
        out.children = [a]
        out.op = 'transpose'
        return out
コード例 #28
0
ファイル: ssf.py プロジェクト: GiggleLiu/pymps
 def chlabel(self, nlabel):
     '''
     Change the overall labels.
     '''
     self.labels = nlabel
     for i, (l_left, l_right) in enumerate(zip(self.SN[:-1], self.SN[1:])):
         l_all = ''.join(str(i) for i in range(l_left, l_right))
         self.ML[i] = tensor.Tensor(self.ML[i],
                                    labels=[
                                        '%s_%s' % (nlabel[1], l_left),
                                        '%s_%s' % (nlabel[0], l_all),
                                        '%s_%s' % (nlabel[1], l_right)
                                    ])
コード例 #29
0
ファイル: functional.py プロジェクト: LongJohnCoder/nanograd
    def backward(ctx, grad_output):
        x, weight, bias = ctx.saved_tensors
        x_cols = ctx.x_cols
        stride, pad = ctx.stride, ctx.pad

        N, C, L = x.shape
        F, _, KL = weight.shape
        _, _, OL = grad_output.shape

        grad_bias = np.sum(grad_output.data, axis=(0, 2))
        grad_bias = tensor.Tensor(grad_bias)

        grad_out_reshaped = grad_output.data.transpose(1, 2, 0).reshape(F, -1)
        grad_weight = (grad_out_reshaped @ x_cols.T).reshape(weight.shape)
        grad_weight = tensor.Tensor(grad_weight)

        grad_x_cols = weight.data.reshape(F, -1).T @ grad_out_reshaped
        grad_x_cols.shape = (C, KL, N, OL)
        grad_x = col2im(grad_x_cols, x.shape, 1, KL, pad, stride)
        grad_x = tensor.Tensor(grad_x)

        return grad_x, grad_weight, grad_bias
コード例 #30
0
ファイル: functional.py プロジェクト: LongJohnCoder/nanograd
    def forward(ctx, a, b):
        if not (type(a).__name__ == 'Tensor' and type(b).__name__ == 'Tensor'):
            raise Exception("Only tensors can be multiplied element-wise")

        ctx.save_for_backward(a, b)
        requires_grad = a.requires_grad or b.requires_grad

        out = tensor.Tensor(np.multiply(a.data, b.data),
                            requires_grad=requires_grad,
                            is_leaf=not requires_grad)
        out.children = [a, b]
        out.op = 'mul'
        return out