Example #1
0
 def ApplyActivation(self):
     state = self.state
     if self.activation == deepnet_pb2.Hyperparams.LOGISTIC:
         cm.sigmoid(state)
     elif self.activation == deepnet_pb2.Hyperparams.TANH:
         cm.tanh(state)
     elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR:
         state.greater_than(0, target=self.temp)
         state.mult(self.temp)
     elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH:
         cm.log_1_plus_exp(state)
     elif self.activation == deepnet_pb2.Hyperparams.LINEAR:
         pass
     elif self.activation == deepnet_pb2.Hyperparams.SOFTMAX:
         state.max(axis=0, target=self.temp)
         state.add_row_mult(self.temp, -1)
         cm.exp(state)
         state.sum(axis=0, target=self.temp)
         self.temp.reciprocal()
         state.mult_by_row(self.temp)
     elif self.activation == deepnet_pb2.Hyperparams.REPLICATED_SOFTMAX:
         state.max(axis=0, target=self.temp)
         state.add_row_mult(self.temp, -1)
         cm.exp(state)
         state.sum(axis=0, target=self.temp)
         self.NN.divide(self.temp, target=self.temp)
         state.mult_by_row(self.temp)
     else:
         raise Exception('Unknown activation')
Example #2
0
def tests():
    a = np.random.rand(300,500)
    b = np.random.rand(500,300)

    start = timer()
    c = np.dot(a,b)
    nptime = timer()-start
    print('nptime',nptime)

    x = np.array(np.random.rand(600,1500),dtype='float32',order='F')
    y = np.array(np.random.rand(1500,300),dtype='float32',order='F')
    z = np.zeros((1000,1000),order='F',dtype='float32')

    stream = cuda.stream()

    dx = cuda.to_device(x)
    dy = cuda.to_device(y)
    dz = cuda.to_device(z)

    start = timer()
    blas.gemm('N','N',1000,1500,1000,1.0,dx,dy,0.0,dz)
    cutime = timer()-start
    print('cutime',cutime)

    #dz.copy_to_host(z)
    print(dz[0])

    c = np.ones((1000,1000),order='F',dtype='float32')
    print(c.shape)
    dc = cuda.to_device(c)

   # blockDim = (256,256)
    #gridDim = (((1000 + blockDim[0]-1)/blockDim[0]),((1000 + blockDim[1]-1)/blockDim[1]))

    blockDim = (30,30)
    gridDim = ((((c.shape[0] + blockDim[0]) - 1) / blockDim[0]), (((c.shape[1] + blockDim[1]) - 1) / blockDim[1]))

    start = timer()
    mtanh[gridDim,blockDim,stream](dc)
    tantime = timer() - start
    print('tantime',tantime)

    dc.copy_to_host(c,stream=stream)
    stream.synchronize()
    print(c)

    y = cm.CUDAMatrix(np.ones((1000,1000)))

    start = timer()
    cm.tanh(y)
    cmtan = timer()-start
    print('cmtan',cmtan)

    x = cm.CUDAMatrix(np.random.rand(1000,1500))
    y = cm.CUDAMatrix(np.random.rand(1500,1000))

    start = timer()
    cm.dot(x,y)
    cmtime = timer()-start
    print('cmtime',cmtime)
Example #3
0
 def ApplyActivation(self):
     state = self.state
     if self.activation == deepnet_pb2.Hyperparams.LOGISTIC:
         cm.sigmoid(state)
     elif self.activation == deepnet_pb2.Hyperparams.TANH:
         cm.tanh(state)
     elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR:
         state.greater_than(0, target=self.temp)
         state.mult(self.temp)
     elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH:
         cm.log_1_plus_exp(state)
     elif self.activation == deepnet_pb2.Hyperparams.LINEAR:
         pass
     elif self.activation == deepnet_pb2.Hyperparams.SOFTMAX:
         state.max(axis=0, target=self.temp)
         state.add_row_mult(self.temp, -1)
         cm.exp(state)
         state.sum(axis=0, target=self.temp)
         self.temp.reciprocal()
         state.mult_by_row(self.temp)
     elif self.activation == deepnet_pb2.Hyperparams.REPLICATED_SOFTMAX:
         state.max(axis=0, target=self.temp)
         state.add_row_mult(self.temp, -1)
         cm.exp(state)
         state.sum(axis=0, target=self.temp)
         self.NN.divide(self.temp, target=self.temp)
         state.mult_by_row(self.temp)
     else:
         raise Exception("Unknown activation")
 def Update(self,input):
     # input has to have same size
     # as n_in. Returns the output as shape (2,1), so if yo
     # u want to plot the data, a buffer is mandatory.
     input = np.atleast_2d(input)
     Input = cm.CUDAMatrix(input)
     Input = Input.reshape([self.n_in,1])
     self.a = self.a.mult(1-self.leakrate).add(cm.tanh(cm.dot(self.Wrr,self.a).add(cm.dot(self.Wir,Input).add(self.Wbr))).mult(self.leakrate))
     y = cm.dot(self.Wro,self.a)
     return y.asarray()
Example #5
0
 def step(self, dt=None, ctl=False):
     if dt is None:
         dt = self.DT
     self.xdt = self.x.mult(1.0 - dt)
     self.rdt = self.r.mult(dt)
     self.zdt = self.z.mult(dt)
     self.x = self.xdt.add(cm.dot(self.M,
                                  self.rdt)).add(cm.dot(self.wf, self.zdt))
     # if ctl:
     # self.x = self.x + cm.dot(self.wfc,(self.z_ctl*dt))
     self.r = cm.tanh(self.x)
     self.z = cm.dot(self.wo.T, self.r)
Example #6
0
    def __init__(self,
                 N=1000,
                 pz=1,
                 pg=0.1,
                 g=1.5,
                 alpha=1,
                 dt=0.1,
                 num_fits=1,
                 num_inputs=0,
                 state=None):
        cm.cublas_init()
        if state is not None:
            self.from_dict(state)
        else:
            self.N = N
            self.pg = pg
            self.pz = pz
            self.g = g
            self.alpha = alpha
            self.DT = dt
            self.num_fits = num_fits

            scale = 1.0 / np.sqrt(self.pg * self.N)
            M_rvs = stats.norm(loc=0, scale=scale).rvs
            self.M = sp.sparse.random(N, N, pg, data_rvs=M_rvs) * g
            self.M = cm.CUDAMatrix(self.M.toarray())
            self.P = (1.0 / self.alpha) * np.identity(N)
            self.wf = cm.CUDAMatrix(np.random.uniform(-1, 1, (N, num_fits)))
            #self.wo = np.expand_dims(stats.norm(loc=0,scale=(1.0/np.sqrt(N))).rvs(N),num_fits)
            self.wo = cm.CUDAMatrix(np.zeros((N, num_fits)))
            self.dw = np.zeros((N, num_fits))
            self.woc = np.zeros((N, 1))
            self.wfc = np.random.uniform(-1, 1, (N, 1))

            self.x = cm.CUDAMatrix(np.expand_dims(0.5 * np.random.randn(N), 1))
            self.xdt = cm.empty(self.x.shape).assign(0)
            self.r = cm.tanh(self.x)
            self.rdt = cm.empty(self.r.shape).assign(0)
            self.z = cm.CUDAMatrix(
                np.expand_dims(0.5 * np.random.randn(num_fits), 1))
            self.zdt = cm.empty(self.z.shape).assign(0)
            self.z_ctl = np.expand_dims(0.5 * np.random.randn(1), 1)
Example #7
0
 def d_tanh(self, x):
     return 1 - cm.tanh(x)**2
Example #8
0
 def tanh(self, x):
     return cm.tanh(x)