Exemple #1
0
 def tensor(self, initial_value=None, op=None):
     """
     The `tensor` method defines a new tensor with the given initial value
     and operation.
     """
     return Tensor(initial_value=initial_value, graph=self, op=op)
Exemple #2
0
def one_hot_encoding(x: int, classes: int) -> Tensor:
    assert x < classes
    encoded = np.zeros((classes, 1))
    encoded[x, 0] = 1
    return Tensor(encoded)
Exemple #3
0
from tensor import Tensor
from PIL import Image
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import os
import smtplib
import random

if __name__ == '__main__':
    url = ''
    t = Tensor(url)
    folder = "/home/mahima/images"
while (true):
    for filename in os.listdir("/home/mahima/images"):
        numberoffiles = len(folder)
        img = random.choice(os.listdir("/home/mahima/images"))
        image = folder + '/' + img
        print(numberoffiles)
        res = t.classify(image)
        os.remove(image)
        #sending response as an email
        fromaddr = ""
        toaddr = ""
        msg = MIMEMultipart()
        msg['From'] = "Tensorflow"
        msg['To'] = "Mahima"
        msg['Subject'] = "TENSORFLOW CLASSIFICATION RESULT"
        body = '/n' + res
        msg.attach(MIMEText(body, 'plain'))
        filename = image
        attachment = open(image, "rb")
Exemple #4
0
def test_tt_layer_time():
    weights = np.random.rand(4096, 4096).astype(np.float32)
    input = torch.Tensor((1, 4096))
    data = np.random.rand(1, 4096).astype(np.float32)
    input.data = torch.from_numpy(data)
    times_4 = []
    times_8 = []

    ds = [4, 6, 8, 10]
    rs1 = [[1, 8, 8, 8, 1], [1, 8, 8, 8, 8, 8, 1], [1, 8, 8, 8, 8, 8, 8, 8, 1],
           [1, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1]]
    rs2 = [[1, 4, 4, 4, 1], [1, 4, 4, 4, 4, 4, 1], [1, 4, 4, 4, 4, 4, 4, 4, 1],
           [1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1]]
    rss = [rs2, rs1]
    times = [times_4, times_8]
    print('linear')
    layer = LinearLayer(4096, 4096)
    layer.weight.data = torch.from_numpy(weights)
    res = layer.forward(input)
    res = layer.forward(input)
    start = time.time()
    res = layer.forward(input)
    or_t = time.time() - start
    print('===========')
    rs3 = [or_t * 1000] * 4
    for j in range(2):
        rs = rss[j]
        for i in range(4):
            weight_tensor = Tensor(weights, from_matrix=True, d=ds[i])
            # [4, 32, 256, 1494, 4096, 512, 64, 16, 4]
            tt_ranks = rs[i]
            print('d=', ds[i])
            print('tt_ranks', tt_ranks)

            t_tensor = torch.Tensor((4096, 4096))
            t_tensor.data = torch.from_numpy(weights)

            Gs = weight_tensor.tt_with_ranks(tt_ranks)

            sum = 1
            for s in weight_tensor.T.shape:
                sum *= s
            logger.info(f'tt parameters: {len(Gs)}')

            np.save('../CNNs/data/tt_fc_4_alexnet_cores.npy', Gs)

            tt_layer = TTLayer(in_features=weight_tensor.ns,
                               out_features=weight_tensor.ms,
                               tt_ranks=tt_ranks)

            #start = time.time()
            tt_layer.forward(input)
            #logger.info(f'tt_layer {time.time() - start}')

            #start = time.time()
            tt_layer.forward(input)
            #logger.info(f'tt_layer {time.time() - start}')

            start = time.time()
            tt_layer.forward(input)
            t = time.time() - start
            times[j].append(t * 1000)
            #logger.info(f'tt_layer {time.time() - start}')
            print('===========')

    plt.plot(ds, times_4, sns.xkcd_rgb["amber"], label='ap', linewidth=3)
    for p in range(len(ds)):
        plt.plot([ds[p]], [times_4[p]], 'o', color=sns.xkcd_rgb["amber"])

    plt.plot(ds, times_8, sns.xkcd_rgb["dusty red"], label='ap', linewidth=3)
    for p in range(len(ds)):
        plt.plot([ds[p]], [times_8[p]], 'o', color=sns.xkcd_rgb["dusty red"])

    plt.plot(ds, rs3, sns.xkcd_rgb["medium green"], label='ap', linewidth=3)
    for p in range(len(ds)):
        plt.plot([ds[p]], [rs3[p]], 'o', color=sns.xkcd_rgb["medium green"])

    p1 = mpatches.Patch(color=sns.xkcd_rgb["amber"], label='rk = 4')
    p2 = mpatches.Patch(color=sns.xkcd_rgb["dusty red"], label='rk = 8')
    p3 = mpatches.Patch(color=sns.xkcd_rgb["medium green"],
                        label='Исходный слой')

    plt.xlabel('TT-ранг')
    plt.ylabel('Время работы, мс')

    plt.legend(handles=[p1, p2, p3])
    plt.show()
Exemple #5
0
 def __init__(self, input_size: int, hidden_size: int, output_size: int):
     super().__init__()
     self.hidden_unit = RNNHiddenUnit(input_size, hidden_size)
     self.W_hy = Tensor(np.random.normal(loc=0, scale=0.1, size=(output_size, hidden_size)))
     self.b_y = Tensor(np.random.normal(loc=0, scale=0.1, size=(output_size, 1)))
     self.register_parameters([self.hidden_unit, self.W_hy, self.b_y])
Exemple #6
0

X, y = skd.load_diabetes(return_X_y=True)

partition = len(X) // 10

X_train = X[:-partition]
y_train = y[:-partition]

X_test = X[-partition:]
y_test = y[-partition:]


# y_out = matmul( (batch, 13), (13, 1) ) + (1) = (batch, 1)

W = Tensor(np.random.rand(10, 1) - 0.5)
b = Tensor(np.random.rand(1) - 0.5)

Wt = torch.tensor(W.value, requires_grad=True)
bt = torch.tensor(b.value, requires_grad=True)

batch_size = 16
learning_rate = 0.025
optimizer = Optimizer([W, b], lr=learning_rate)
optimizer_t = optim.SGD([Wt, bt], lr=learning_rate, momentum=0.0)

print("GOING TO START")

losses = []
losses_t = []
Exemple #7
0
import numpy as np
from tensor import Tensor
from layers import Sequential, Linear
from activations import Tanh, Sigmoid
from optimizers import SGD
from losses import MSELoss

np.random.seed(0)

data = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), autograd=True)
target = Tensor(np.array([[0], [1], [0], [1]]), autograd=True)

model = Sequential([Linear(2, 3), Tanh(), Linear(3, 1), Sigmoid()])
criterion = MSELoss()

optim = SGD(parameters=model.get_parameters(), alpha=1)

for i in range(10):
    pred = model.forward(data)
    loss = criterion.forward(pred, target)

    loss.backward()
    optim.step()
    print(loss)
Exemple #8
0
 def __call__(self, input: Tensor) -> Tensor:
     n_samples = input.data.shape[0]
     return input.mm(self.weight) + self.bias.expand(repeat = n_samples, axis = 0)
Exemple #9
0
def compute_body(name,
                 lambda_ivs,
                 fcompute,
                 shape=(),
                 dtype=None,
                 tensor=None,
                 attrs=OrderedDict()):
    """Create a stage and perform the computation.

    If `tensor` is `None`, no tensor is returned.

    Parameters
    ----------
    name : str
        The name of the stage

    lambda_ivs : list of IterVar
        A list contains the iteration variables in the lambda function if
        exists

    fcompute : callable
        The computation rule

    shape : tuple, optional
        The output shape or the iteration domain

    dtype : Type, optional
        The data type of the output/updated tensor

    tensor : Tensor, optional
        The tensor to be updated. Create a new one if it is `None`

    Returns
    -------
    Tensor or None
    """
    var_list = [i.var for i in lambda_ivs]
    return_tensor = True if tensor is None else False

    with Stage(name, dtype, shape) as stage:
        if not return_tensor:
            stage.input_stages.add(tensor.last_update)
        else:
            tensor = Tensor(shape, stage._dtype, name, stage._buf)
        buffer_var = tensor._buf.data
        dtype = tensor.dtype
        shape = tensor.shape

        stage.stmt_stack.append([])
        ret = fcompute(*var_list)

        print(dir(ret))
        print(dir(ret.a))
        print(dir(ret.b))
        stage.lhs_tensors.add(tensor)
        for t in stage.lhs_tensors:
            t.last_update = stage

        stmt = None
        if ret is None:
            # replace all hcl.return_ with Store stmt
            indices = lambda_ivs
            index, _, _ = get_index(shape, indices, 0)
            stmt = stage.pop_stmt()
            stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt)
            stmt = make_for(indices, stmt, 0)
        elif isinstance(ret,
                        (TensorSlice, Scalar, _expr.Expr, numbers.Number)):
            indices = lambda_ivs
            index, _, _ = get_index(shape, indices, 0)
            stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index))
            stmt = make_for(indices, stage.pop_stmt(), 0)
        elif isinstance(ret, Tensor):  # reduction
            ret_ivs = [
                _IterVar((0, ret.shape[i]), ret.name + "_i" + str(i), 0)
                for i in range(0, len(ret.shape))
            ]
            non_reduce_ivs = []
            indices = []
            rid = 0
            for iv in lambda_ivs:
                if iv.var.name[0] == "_":
                    indices.append(ret_ivs[rid])
                    rid += 1
                else:
                    indices.append(iv)
                    non_reduce_ivs.append(iv)
            if rid != len(ret.shape):
                raise APIError(
                    "Incorrect number of reduction axes in lambda arguments")
            index, _, _ = get_index(shape, indices, 0)
            st = _make.Store(buffer_var,
                             _make.Cast(dtype, ret[tuple(ret_ivs)]), index)
            stage.emit(make_for(ret_ivs, st, 0))
            stmt = stage.pop_stmt()
            stage.input_stages.remove(stage)
            if non_reduce_ivs:
                stmt = make_for(non_reduce_ivs, stmt, 0)
        else:
            raise APIError("Unknown return type of the computation rule")
        # add attributes to the loop
        if isinstance(stmt, _stmt.For):
            stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0,
                             0, stmt.body, list(attrs.keys()),
                             list(attrs.values()))
        stage.emit(stmt)
        stage.axis_list = indices + stage.axis_list

    if return_tensor:
        tensor._tensor = stage._op
        return tensor
    return None