Exemplo n.º 1
0
 def test_tinygrad():
     u = Tensor(U_init, device=self.device)
     v = Tensor(V_init, device=self.device)
     w = Tensor(W_init, device=self.device)
     x = u.mul(v).relu()
     y = u.mul(w).relu()
     out = x.add(y).mul(y).relu()
     out = out.logsoftmax()
     out = out.sum()
     out.backward()
     return out.cpu().data, u.cpu().grad.data, v.cpu().grad.data, w.cpu(
     ).grad.data
Exemplo n.º 2
0
    def forward(self, x):
        modules = self.blocks[1:]
        outputs = {}  # Cached outputs for route layer
        write = 0

        for i, module in enumerate(modules):
            module_type = (module["type"])
            st = time.time()
            if module_type == "convolutional" or module_type == "upsample":
                for index, layer in enumerate(self.module_list[i]):
                    x = layer(x)

            elif module_type == "route":
                layers = module["layers"]
                layers = [int(a) for a in layers]

                if (layers[0]) > 0:
                    layers[0] = layers[0] - i
                if len(layers) == 1:
                    x = outputs[i + (layers[0])]
                else:
                    if (layers[1]) > 0: layers[1] = layers[1] - i

                    map1 = outputs[i + layers[0]]
                    map2 = outputs[i + layers[1]]

                    x = Tensor(
                        np.concatenate((map1.cpu().data, map2.cpu().data), 1))

            elif module_type == "shortcut":
                from_ = int(module["from"])
                x = outputs[i - 1] + outputs[i + from_]

            elif module_type == "yolo":
                anchors = self.module_list[i][0].anchors
                inp_dim = int(self.net_info["height"])
                # inp_dim = 416

                num_classes = int(module["classes"])
                # Transform
                x = predict_transform(x, inp_dim, anchors, num_classes)
                if not write:
                    detections = x
                    write = 1
                else:
                    detections = Tensor(
                        np.concatenate((detections.cpu().data, x.cpu().data),
                                       1))

            # print(module_type, 'layer took %.2f s' % (time.time() - st))
            outputs[i] = x

        return detections  # Return detections
Exemplo n.º 3
0
    def backward(ctx, grad_output):
        in_shape, = ctx.saved_tensors
        return MetalBuffer(in_shape, grad_output)


# METAL=1 python3 test/test_ops.py TestOps.test_relu
if __name__ == "__main__":
    b1 = MetalBuffer(10, np.ones(10))
    b2 = MetalBuffer(10, np.ones(10))
    out = MetalBuffer(10, None)

    mtl_buffer = cmd_buffer()
    add_shader.encodeToCommandBuffer_primaryTexture_secondaryTexture_destinationTexture_(
        mtl_buffer, b1.texture, b2.texture, out.texture)
    mtl_buffer.commit()

    print(b1.toCPU())
    print(b2.toCPU())
    print(out.toCPU())

    from tinygrad.tensor import Tensor, Device

    r1 = Tensor([-2, -1, 0, 2, 4], device=Device.METAL)
    r2 = r1.relu()
    r3 = r2.sum()
    r3.backward()
    print(r1.cpu())
    print(r2.cpu())
    print(r3.cpu())
Exemplo n.º 4
0
#!/usr/bin/env python3
import numpy as np
from tinygrad.tensor import Tensor

a = Tensor([-2, -1, 0, 1, 2]).ane()
print(a.cpu())
b = a.relu()
print(b.cpu())