コード例 #1
0
    def __next__(self):
        if self._n == len(self._dataset):
            raise StopIteration

        ret = ()
        for i in range(self._num_ret):
            ret += (coatl.tensor(shape=(self._batch_size, ) +
                                 self._sizes[i]), )

        for i in range(self._batch_size):
            sample = self._dataset[self._indexes[self._n]]

            for j in range(self._num_ret):
                if isinstance(sample[j], coatl.tensor):
                    ret[j]._data[i] = sample[j]._data
                elif isinstance(sample[j], np.ndarray):
                    ret[j]._data[i] = sample[j]
                else:
                    raise TypeError(
                        'Dataset returned unhandled type \'%d\' to dataloader'
                        % str(type(ret[j])))
            self._n += 1
            if self._n == len(self._dataset):
                break

        return ret
コード例 #2
0
    def forward(self, x, axis=None):
        if axis is not None and not isinstance(axis, int):
            raise TypeError(
                'Error expected param axis to be type int, got %s' %
                str(type(axis)))
        if axis is not None and axis >= len(x.shape):
            raise ValueError(
                'Specified axis \'%d\', which is greater than max dim of x \'%d\''
                % (axis, len(x.shape) - 1))
        ax = np.arange(len(x.shape))
        if axis is not None:
            ax = np.delete(ax, axis)

        tile_sz = list(x._data.shape)
        z_sz = [1] * len(tile_sz)
        tile_sz[axis], z_sz[axis] = z_sz[axis], tile_sz[axis]

        m = np.max(x._data, axis=tuple(ax))
        m = np.tile(np.reshape(m, tuple(z_sz)), tuple(tile_sz))
        exp = np.exp(x._data - m)
        z = np.sum(exp, axis=tuple(ax))
        z = np.tile(np.reshape(z, tuple(z_sz)), tuple(tile_sz))

        ret_tensor = coatl.tensor(data=exp / z)
        return ret_tensor
コード例 #3
0
 def forward(self, activations, target):
     l = 0.
     for i in range(activations.shape[0]):
         l += 0.5 * np.sum(
             (target._data[i, :] - activations._data[i, :])**2)
     if self._average:
         l = l / (activations.shape[0])
     return coatl.tensor(data=np.asarray([l]))
コード例 #4
0
 def forward(self, x):
     if self._bias:  #append 1 feature to x to account for bias
         data = np.concatenate((x._data, np.ones(
             (x.shape[0], 1))), axis=1) @ self._param._data
     else:
         data = x._data @ self._param._data
     ret_tensor = coatl.tensor(data=data)
     return ret_tensor
コード例 #5
0
 def forward(self, activations, target):
     if np.sum(activations._data < 0):
         raise ValueError(
             'Logistic Loss function can not take negative values.')
     l = -np.sum(target._data * np.log(activations._data + self._eps) +
                 (1 - target._data) *
                 np.log(1 - activations._data + self._eps))
     l = l / activations.shape[1]
     if self._average:
         l = l / activations.shape[0]
     return coatl.tensor(data=np.asarray([l]))
コード例 #6
0
    def __getitem__(self, idx):
        if idx >= self._labels.shape[0]:
            raise IndexError('Index %d, greater than dataset size %d' %
                             (idx, self._labels.shape[0]))

        img = self._images[idx]
        if self._tform is not None:
            img = self._tform(img)

        if self._oneHot:
            lbl = np.zeros((10, ))
            lbl[self._labels[idx]] = 1
        else:
            lbl = self._labels[
                idx:idx +
                1]  #index as a range len 1 to ensure lbl is a numpy array
        label = coatl.tensor(data=lbl)

        return img, label
コード例 #7
0
    def __init__(self,
                 input_size,
                 output_size,
                 bias=True,
                 initializer='Gaussian',
                 std=None):
        super(layer).__init__()
        self._bias = bias
        self._in_sz = input_size
        if self._bias:
            self._in_sz += 1
        self._out_sz = output_size

        self._param = coatl.tensor(shape=(self._in_sz, self._out_sz))
        self._param._data = np.random.randn(*self._param.shape)

        if initializer == 'Gaussian':
            self._gaussian_initializer(std)
        elif initializer == 'Zero':
            self._zero_initializer()
        else:
            raise ValueError('Unkown initializer: \'%s\'' % initializer)
コード例 #8
0
ファイル: CELoss.py プロジェクト: csimo005/EE260-Assignments
 def forward(self, activations, target):
     l = -np.sum(np.log(activations._data[np.arange(activations.shape[0]), target._data]))
     if self._average:
         l = l/(activations.shape[0])
     return coatl.tensor(data=np.asarray([l]))
コード例 #9
0
ファイル: DropOut.py プロジェクト: csimo005/EE260-Assignments
 def forward(self, x):
     drop = np.random.binomial(2, self._p, x.shape)
     ret_tensor = coatl.tensor(data=(drop * x._data))
     return ret_tensor
コード例 #10
0
ファイル: sigmoid.py プロジェクト: csimo005/EE260-Assignments
 def forward(self, x):
     data = 1 / (1 + np.exp(-1 * x._data))
     ret_tensor = coatl.tensor(data=data)
     return ret_tensor
コード例 #11
0
def createLabel(label):
    data = np.zeros((label.shape[0],2))
    data[np.reshape(label._data,(-1,)) <= 4,0] = 1
    data[np.reshape(label._data,(-1,)) > 4,1] = 1
    return coatl.tensor(data=data)
コード例 #12
0
 def forward(self, x):
     data = x._data*(np.ones(x.shape) - (x._data<=0)*(1-self._alpha))
     ret_tensor = coatl.tensor(data=data)
     return ret_tensor
コード例 #13
0
import os, sys
sys.path.append(os.path.join(os.getcwd()))

import coatl
import coatl.layers as layers

X = coatl.tensor(shape=(1, 10))
W = layers.linear(10, 5, bias=False)
Y = W(X)
print(Y.shape)