コード例 #1
0
ファイル: test_conv.py プロジェクト: DEVESHTARASIA/Theano
    def test_bug_josh_reported(self):
        # Test refers to a bug reported by Josh, when due to a bad merge these
        # few lines of code failed. See
        # http://groups.google.com/group/theano-dev/browse_thread/thread/8856e7ca5035eecb

        m1 = theano.tensor.matrix()
        m2 = theano.tensor.matrix()
        conv.conv2d(m1, m2)
コード例 #2
0
 def __init__(self, input, depth, length, width, in_l, in_w, first_layer=False):
     self.W = theano.shared(np.random.uniform(low=-1./np.sqrt(width*length*depth), high=1./np.sqrt(width*length*depth), size=(depth,length,width)).astype(dtype=theano.config.floatX))
     if first_layer:
         self.output = conv.conv2d(input[0], self.W[0], image_shape=(in_l, in_w), filter_shape=(length, width))
     else:
         self.output = conv.conv2d(input[0], self.W[0], image_shape=(in_l, in_w), filter_shape=(length, width), border_mode='full')
     for i in range(1, depth):
         if first_layer:
             self.output = self.output + conv.conv2d(input[i], self.W[i], image_shape=(in_l, in_w), filter_shape=(length, width))
         else:
             self.output = self.output + conv.conv2d(input[i], self.W[i], image_shape=(in_l, in_w), filter_shape=(length, width), border_mode='full')
コード例 #3
0
ファイル: test_conv.py プロジェクト: hamelphi/Theano
 def test_fail(self):
     """
     Test that conv2d fails for dimensions other than 2 or 3.
     """
     try:
         conv.conv2d(T.dtensor4(), T.dtensor3())
         self.fail()
     except:
         pass
     try:
         conv.conv2d(T.dtensor3(), T.dvector())
         self.fail()
     except:
         pass
コード例 #4
0
    def __dealWithOneDoc(self, DocSentenceCount0, oneDocSentenceCount1, \
                         docs, corpusPos, oneDocSentenceWordCount, docW, docB, sentenceW, sentenceB, posW, posB):
#         t = T.and_((shareRandge < oneDocSentenceCount1 + 1),  (shareRandge >= DocSentenceCount0)).nonzero()
        oneDocSentenceWordCount = oneDocSentenceWordCount[DocSentenceCount0:oneDocSentenceCount1 + 1]
        
        sentenceResults0, _ = theano.scan(fn=self.__dealWithSentence,
                            non_sequences=[docs, sentenceW, sentenceB],
                             sequences=[dict(input=oneDocSentenceWordCount, taps=[-1, -0])],
                             strict=True)
        sentenceResults1, _ = theano.scan(fn=self.__dealWithSentence,
                            non_sequences=[corpusPos, posW, posB],
                             sequences=[dict(input=oneDocSentenceWordCount, taps=[-1, -0])],
                             strict=True)
        sentenceResults = T.concatenate([sentenceResults0, sentenceResults1], axis=1)
#         p = printing.Print('docPool')
#         docPool = p(docPool)
#         p = printing.Print('sentenceResults')
#         sentenceResults = p(sentenceResults)
#         p = printing.Print('doc_out')
#         doc_out = p(doc_out)
        doc_out = conv.conv2d(input=sentenceResults, filters=docW)
        docPool = downsample.max_pool_2d(doc_out, (self.__MAXDIM, 1), mode=self.__pooling_mode, ignore_border=False)
        docOutput = T.tanh(docPool + docB.dimshuffle([0, 'x', 'x']))
        doc_embedding = docOutput.flatten(1)
        return doc_embedding
コード例 #5
0
ファイル: cnn_layer.py プロジェクト: rocharhs/nlp_nn
    def __init__(self, input, n_in,n_out,filter_size,activation=T.tanh):
        delta = 0.01
        self.input = input
        self.n_in = n_in
        self.n_out = n_out
        n_filters = n_out
        self.name = 'C1D' + str(n_filters) + 'x'+str(filter_size)

        self.W = theano.shared(
              (np.random.uniform(-1,1,(n_filters, filter_size,n_in)) * delta).astype(T.config.floatX))
        self.b = theano.shared(
              (np.zeros(n_out,)))

        self.params = [self.W, self.b]
        pad = self.W.shape[1]/2
        zeros = T.zeros((input.shape[0] + 2*pad, input.shape[1]))
        xPad = T.set_subtensor(zeros[pad:input.shape[0]+pad], input)

        out = conv.conv2d(
                   input=xPad,
                   filters=self.W)

        out = out[:,:,0].dimshuffle(1,0)
        out = activation(self.b + out)

        self.output = out
        self.updates = None
コード例 #6
0
ファイル: impulse.py プロジェクト: mmyros/pyglm
 def filter_spike_train(n,S,taus):
     """ Helper function to filter the spike train
     """
     filt = T.shape_padright(filt_fn(taus[n]), n_ones=1)
     filtered_S = conv2d(T.shape_padright(S[:,n], n_ones=1), 
                         filt, 
                         border_mode='full')
     return filtered_S[0,:,0]
コード例 #7
0
ファイル: conv.py プロジェクト: zeka0/ether
    def connect(self, *layers):
        assert len(layers) == 1
        self.intputShape = layers[0].get_outputShape()

        self.set_inputTensor( layers[0].get_outputTensor() )
        filter = np.ones( (1, 1) )
        outputTensor = self.coef * conv2d( self.get_inputTensor(), filters=filter, subsample=self.subSampleShape ) + self.bias
        self.set_outputTensor( outputTensor )
コード例 #8
0
ファイル: DocNN.py プロジェクト: shockline/KnowlegeableCNN
 def __dealWithSentence(self, wc0, wc1, docs, W, B):
     sentence = docs[wc0:wc1]
     
     sentence_out = conv.conv2d(input=sentence, filters=W)
     sentence_pool = downsample.max_pool_2d(sentence_out, (self.__MAXDIM, 1), mode=self.__pooling_mode, ignore_border=False)
     
     sentence_output = T.tanh(sentence_pool + B.dimshuffle([0, 'x', 'x']))
     sentence_embedding = sentence_output.flatten(1)
     return sentence_embedding
コード例 #9
0
    def __dealWithSentence(self, sentenceWordCount0, sentenceWordCount1, docs, sentenceW, sentenceB):
#         t = T.and_((shareRandge < sentenceWordCount1), (shareRandge >= sentenceWordCount0)).nonzero()
        sentence = docs[sentenceWordCount0:sentenceWordCount1]
        
        sentence_out = conv.conv2d(input=sentence, filters=sentenceW)
        sentence_pool = downsample.max_pool_2d(sentence_out, (self.__MAXDIM, 1), mode="average_exc_pad", ignore_border=False)
        
        sentence_output = T.tanh(sentence_pool + sentenceB.dimshuffle([0, 'x', 'x']))
        sentence_embedding = sentence_output.flatten(1)
        return sentence_embedding
コード例 #10
0
ファイル: conv.py プロジェクト: zeka0/ether
    def connect(self, *layers):
        self.set_preLayers(layers)
        self.init_filters()
        self.outputShape = conv2D_shape( layers[0].get_outputShape(), self.get_filterShape() )
        self.init_bias()

        outputTensor = self.get_bias()
        inputTensors = []
        #one filter for every pre-layer
        for i, filter in zip( xrange( self.get_numOfFilters() ), self.get_filters() ):
            outputTensor = outputTensor + conv2d( layers[i].get_outputTensor(), filters=filter )
            inputTensors.append( layers[i].get_outputTensor() )

        self.set_outputTensor( outputTensor )
        self.set_inputTensors( inputTensors )
コード例 #11
0
ファイル: script_test_conv.py プロジェクト: erikvdp/Thesis
def test_conv(X, music_shape, filter_shape, subsample):
    """
    X: Shared variable
    """

    W = T.tensor3('W')
    output = conv2d(X, W, image_shape=music_shape, filter_shape=filter_shape, subsample=subsample)
    f = theano.function([W], output)

    ' now test: '
    try:
        W = np.random.rand(*filter_shape).astype(np.float32)
        f(W)
        return 1
    except Exception, err:
        print 'ERROR: %s\n' % str(err)
        return 0
コード例 #12
0
ファイル: test_conv.py プロジェクト: hamelphi/Theano
 def sym_conv2d(input, filters):
     return conv.conv2d(input, filters)
コード例 #13
0
ファイル: choose_kernel.py プロジェクト: zeka0/ether
filePath = r'E:\VirtualDesktop\nnet\minist\double_mnist.pkl.gz'
picPath = r'E:\VirtualDesktop\nnet\minist\kernels.pkl'

xData = readMnist( filePath )
xData10 = xData[1][0]

yData = (xData10[3], xData10[2], xData10[1], xData10[44], xData10[4],
         xData10[8], xData10[11], xData10[0], xData10[61], xData10[62])

from theano.tensor.signal.conv import conv2d
import theano
from theano import tensor as T
xT = T.matrix()
filter = np.ones( (1, 1) )
outputTensor = conv2d(input=xT, filters=filter, subsample=(2,2) )
fun = theano.function(inputs=[xT], outputs=outputTensor)

tmp = []
tmpImg = None
tmp2=[]

for img in yData:
    tmpImg = fun(img)
    tmpImg = norm_img(tmpImg)[2:-2]
    tmp.append(trim_img(tmpImg))

for img in tmp:
    tmp2.append(img.flat[:])

yyData = tuple(tmp2)
コード例 #14
0
ファイル: inlayers.py プロジェクト: IndujaVJ/theanet
    def __init__(self, inpt, img_sz,
                 num_maps = 1,
                 translation=0,
                 zoom=1,
                 magnitude=0,
                 sigma=1,
                 pflip=0,
                 angle=0,
                 rand_gen=None,
                 invert_image=False,
                 nearest=False):
        self.inpt = inpt
        self.img_sz = img_sz
        self.translation = translation
        self.zoom = zoom
        self.magnitude = magnitude
        self.sigma = sigma
        self.invert = invert_image
        self.nearest = nearest

        self.out_sz = img_sz
        self.num_maps = num_maps
        self.n_out = self.num_maps * self.out_sz ** 2
        self.params = []
        self.representation = ('Elastic Maps:{:d} Size:{:2d} Translation:{:} '
                               'Zoom:{} Mag:{:d} Sig:{:d} Noise:{} '
                               'Angle:{} Invert:{} '
                               'Interpolation:{}'.format(
            self.num_maps, img_sz,
            translation, zoom, magnitude, sigma,
            pflip, angle, invert_image,
            'Nearest' if nearest else 'Linear'))

        if invert_image:
            inpt = 1 - inpt

        assert zoom > 0
        if not (magnitude or translation or pflip or angle) and zoom == 1:
            self.output = inpt
            self.debugout = [self.output, tt.as_tensor_variable((0, 0))]
            return

        srs = tt.shared_randomstreams.RandomStreams(rand_gen.randint(1e6)
                                                    if rand_gen else None)
        h = w = img_sz

        # Humble as-is beginning
        target = tt.as_tensor_variable(np.indices((h, w)))

        # Translate
        if translation:
            transln = translation * srs.uniform((2, 1, 1), -1)
            target += transln

        # Apply elastic transform
        if magnitude:
            # Build a gaussian filter
            var = sigma ** 2
            filt = np.array([[np.exp(-.5 * (i * i + j * j) / var)
                             for i in range(-sigma, sigma + 1)]
                             for j in range(-sigma, sigma + 1)], dtype=float_x)
            filt /= 2 * np.pi * var

            # Elastic
            elast = magnitude * srs.normal((2, h, w))
            elast = sigconv.conv2d(elast, filt, (2, h, w), filt.shape, 'full')
            elast = elast[:, sigma:h + sigma, sigma:w + sigma]
            target += elast

        # Center at 'about' half way
        if zoom-1 or angle:
            origin = srs.uniform((2, 1, 1), .25, .75) * \
                     np.array((h, w)).reshape((2, 1, 1))
            target -= origin

            # Zoom
            if zoom-1:
                zoomer = tt.exp(np.log(zoom) * srs.uniform((2, 1, 1), -1))
                target *= zoomer

            # Rotate
            if angle:
                theta = angle * np.pi / 180 * srs.uniform(low=-1)
                c, s = tt.cos(theta), tt.sin(theta)
                rotate = tt.stack(c, -s, s, c).reshape((2,2))
                target = tt.tensordot(rotate, target, axes=((0, 0)))

            # Uncenter
            target += origin

        # Clip the mapping to valid range and linearly interpolate
        transy = tt.clip(target[0], 0, h - 1 - .001)
        transx = tt.clip(target[1], 0, w - 1 - .001)

        if nearest:
            vert = tt.iround(transy)
            horz = tt.iround(transx)
            output = inpt[:, :, vert, horz]
        else:
            topp = tt.cast(transy, 'int32')
            left = tt.cast(transx, 'int32')
            fraction_y = tt.cast(transy - topp, float_x)
            fraction_x = tt.cast(transx - left, float_x)

            output = inpt[:, :, topp, left] * (1 - fraction_y) * (1 - fraction_x) + \
                     inpt[:, :, topp, left + 1] * (1 - fraction_y) * fraction_x + \
                     inpt[:, :, topp + 1, left] * fraction_y * (1 - fraction_x) + \
                     inpt[:, :, topp + 1, left + 1] * fraction_y * fraction_x

        # Now add some noise
        if pflip:
            mask = srs.binomial(n=1, p=pflip, size=inpt.shape, dtype=float_x)
            output = (1 - output) * mask + output * (1 - mask)

        self.output = output
        self.debugout = [self.output,
                         target - np.indices((h, w)),]

        if translation:
            self.debugout.append(transln)
        if zoom-1 or angle:
            self.debugout.append(origin)
        if angle:
            self.debugout.append(theta*180/np.pi)
        if zoom-1:
            self.debugout.append(zoomer)
コード例 #15
0
def convolve_special_image(channel_index,
                               input4D, 
                               input_images, fU1, fU2, fU3,  flmda,
                               ib, iw, ih):
         """
        Convolve one image with separabl filters.

        :type input: theano.tensor.dtensor3
        :param input: symbolic image tensor, of shape image_shape

        :type image_shape: tuple or list of length 3
        :param image_shape: ( nbr channels, image height, image width)
        
        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters,nbrmaps, filter height,filter width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """                        
         U1 = fU1[channel_index,:,:]
         U2 = fU2[channel_index,:,:]
         U3 = fU3[channel_index,:,:]
         lmda = flmda[channel_index,:]
         one_chanel_images = input_images[:,channel_index,:,:];
        ## We look at the composition for the first channel in the beginning  
         
         image_shape = (ib, iw, ih)
         filter_shape = (U3.shape[0], input_images[1],U2.shape[0],U1.shape[0])
         rank = U1.shape[1]
         fwidth = filter_shape[2]
         fheight = filter_shape[3]
         
         
         # Construct horizontal filters
         #TODO save the filters in the correct shape
         horizontal_filter_shape = (rank, 1, fwidth)
         horizontal_filters = np.ndarray(shape=horizontal_filter_shape)
         horizontal_filters[:, 0, :] = np.transpose(U1);
        
         # Output is batch size x rank x W x H
         horizontal_conv_out = conv.conv2d(input=one_chanel_images, 
                                           filters = horizontal_filters,
                                           filter_shape = horizontal_filter_shape, 
                                           image_shape = image_shape)
         
         # Construct vertical filters
         vertical_filter_shape = (rank, fheight, 1)
         vertical_filters = np.ndarray(vertical_filter_shape)        
         vertical_filters[:,:, 0] = np.transpose(U2);

         initial_n_rows = image_shape[1]
         final_n_rows = initial_n_rows- fwidth + 1
         final_n_cols = image_shape[2] - fheight + 1 
         batch_size = image_shape[0]
         conv_out = theano.shared(np.zeros((batch_size, rank, final_n_rows, final_n_cols)))
         for r in range(rank):
             # output is batch_size x 1 x imageW x imageH
             A = conv.conv2d(input = horizontal_conv_out[:,r,:,:].reshape((batch_size, initial_n_rows, final_n_cols)), 
                             filters = vertical_filters[r,:,:],
                             filter_shape = (1, fheight, 1), 
                             image_shape = (batch_size, initial_n_rows, final_n_cols))
             conv_out = T.set_subtensor(conv_out[:,r,:,:], A[:,:,:])

  
  
         nbr_filters = U3.shape[0]
         # Final number of rows and columns                        
         ## numberof images, number of filters, image width, image height
         alphas = U3 
         for f in range(nbr_filters):            
            temp = theano.shared(np.zeros((batch_size, final_n_rows, final_n_cols)))
            for r in range(rank):
                temp = temp + conv_out[:,r, :,:]* alphas[f, r] * lmda[r]; 
            input4D =T.set_subtensor(input4D[:,f,:,:], temp)
         return input4D   
コード例 #16
0
    images.shape = (images.shape[0], 1, images.shape[1], images.shape[2])
  Wshape = W.shape
  if (len(W.shape) < 4):
    W.shape = (W.shape[0], 1, W.shape[1], W.shape[2])
  tempI = shared(images)
  tempW = shared(W)
  c_out = T.nnet.conv2d(tempI, tempW)
  c_fun = theano.function([], c_out)
  R = c_fun()
  W.shape = Wshape
  images.shape = Ishape
  return R

A = T.dtensor3('W')
B = T.dtensor3('conv_op_input')
co = conv.conv2d(A, B)
conv_fun_T3 = theano.function([A, B], co)


def rot(A):
  return np.rot90(A, 2)

def average_pool(A, pool_dim):
  B = np.ones((1, pool_dim, pool_dim)).astype(floatX)
  R = np.zeros((A.shape[0], A.shape[1], A.shape[2]/pool_dim,
                A.shape[3]/pool_dim)).astype(floatX)
  for i in range(A.shape[0]):
    temp = convolveTH4(A[i], B)[:, 0, 0::pool_dim, 0::pool_dim]
    R[i] = temp / (pool_dim * pool_dim)
  return R
コード例 #17
0
ファイル: deep_cnn.py プロジェクト: baccuslab/LNKS-nnet
def filt_app(y):
	filt = T.alloc(1., filt_len, 1)/filt_len #filter height = filt_len, filter width = 1
	temp = conv2d(y, filt, border_mode='full')
	temp = temp[:, filt_len/2:, :]
	temp = temp[:, :rec_length, :]
	return temp
コード例 #18
0
    def convolve_one_image(self,input4D, one_image, image_shape, 
                           Pstruct, filter_shape,
                           image_index,
                           channel_index):
         """
        Convolve one image with separabl filters.

        :type input: theano.tensor.dtensor3
        :param input: symbolic image tensor, of shape image_shape

        :type image_shape: tuple or list of length 3
        :param image_shape: ( nbr channels, image height, image width)
        
        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters,nbrmaps, filter height,filter width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """                        
  
    
        ## We look at the composition for the first channel in the beginning  
         rank = Pstruct[0]['U1'].shape[1]
         fwidth = filter_shape[2]
         fheight = filter_shape[3]
         
         
         # Construct horizontal filters
         #TODO save the filters in the correct shape
         horizontal_filter_shape = (rank, 1, fwidth)
         horizontal_filters = np.ndarray(horizontal_filter_shape)
         horizontal_filters[:, 0, :] = np.transpose(Pstruct[channel_index]['U1']);
        
         # Output is 1 x rank x W x H
         horizontal_conv_out = conv.conv2d(input=one_image, 
                                           filters = horizontal_filters,
                                           filter_shape = horizontal_filter_shape, 
                                           image_shape = image_shape)
         
         # Construct vertical filters
         vertical_filter_shape = (rank, fheight, 1)
         vertical_filters = np.ndarray(vertical_filter_shape)        
         vertical_filters[:,:, 0] = np.transpose(Pstruct[channel_index]['U2']);

         initial_n_rows = image_shape[1]
         final_n_rows = initial_n_rows- fwidth + 1
         final_n_cols = image_shape[2] - fheight + 1 
         conv_out = theano.shared(np.zeros((rank, final_n_rows, final_n_cols)))
         for r in range(rank):
             # temp is 1x1x imageW x imageH
             A = conv.conv2d(input = horizontal_conv_out[:,r,:,:], 
                             filters = vertical_filters[r,:,:],
                             filter_shape = (1, fheight, 1), 
                             image_shape = (1, initial_n_rows, final_n_cols))
             conv_out = T.set_subtensor(conv_out[r,:,:], A[0,:,:])
  
         nbr_filters = Pstruct[0]['U3'].shape[0]
         # Final number of rows and columns                        
         ## numberof images, number of filters, image width, image height
         alphas = Pstruct[channel_index]['U3']  
         for f in range(nbr_filters):            
            temp = theano.shared(np.zeros((final_n_rows, final_n_cols)))
            for r in range(rank):
                temp = temp + conv_out[r, :,:]* alphas[f, r] * Pstruct[channel_index]['lmbda'][r]; 
            input4D =T.set_subtensor(input4D[image_index,f,:,:], temp)
         return input4D   
コード例 #19
0
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        from theano.tensor.signal.conv import conv2d
        assert len(bottom) >= 2
        assert len(bottom) <= 3
        assert len(top) == 1
        # parameter
        self.K_ = [0.01, 0.03]
        self.L_ = 1.0
        param = eval(self.param_str_)
        self.hsize_ = param.get('hsize', 11)
        self.sigma_ = param.get('sigma', 1.5)
        assert self.hsize_ % 2 == 1
        hsize = self.hsize_
        sigma = self.sigma_
        C1 = (self.K_[0] * self.L_) ** 2
        C2 = (self.K_[1] * self.L_) ** 2
        # Creating gaussian filter
        x = np.exp(-0.5 * ((np.arange(hsize) - int(hsize / 2)) ** 2) /
                   (sigma ** 2))
        filt = x.reshape(-1, 1) * x.reshape(1, -1)
        filt /= filt.sum()

        # Build a Theano function which computes SSIM and its gradients wrt two
        # images
        simg1_in = T.ftensor3()
        simg2_in = T.ftensor3()

        if len(bottom) > 2:
            smask = T.ftensor3()
            sk = T.sum(simg1_in * simg2_in * smask) \
                / T.sum(simg1_in * simg1_in * smask)
            simg1 = sk * simg1_in * smask
            simg2 = simg2_in * smask
        else:
            sk = T.sum(simg1_in * simg2_in) \
                / T.sum(simg1_in * simg1_in)
            simg1 = sk * simg1_in
            simg2 = simg2_in
        sfilt = tn.shared(filt.astype(np.float32))
        smu1 = conv2d(simg1, sfilt)
        smu2 = conv2d(simg2, sfilt)
        smu1_sq = smu1 * smu1
        smu2_sq = smu2 * smu2
        smu1_mu2 = smu1 * smu2
        ssig1_sq = conv2d(simg1 * simg1, sfilt) - smu1_sq
        ssig2_sq = conv2d(simg2 * simg2, sfilt) - smu2_sq
        ssig12 = conv2d(simg1 * simg2, sfilt) - smu1_mu2
        sssim = (
            ((2 * smu1_mu2 + C1) * (2 * ssig12 + C2))
            / ((smu1_sq + smu2_sq + C1) * (ssig1_sq + ssig2_sq + C2))
        ).mean()
        sdssim = (1 - sssim) / 2
        gimg1, gimg2 = tn.grad(sdssim, [simg1_in, simg2_in])
        if len(bottom) > 2:
            self.fdssim_with_grad = tn.function(
                [simg1_in, simg2_in, smask], [sdssim, gimg1, gimg2])
        else:
            self.fdssim_with_grad = tn.function(
                [simg1_in, simg2_in], [sdssim, gimg1, gimg2])
コード例 #20
0
    def __init__(self,
                          corpus,
                          sentenceWordCount,
                          rng,
                          wordEmbeddingDim,
                          sentenceLayerNodesNum=2,
                          sentenceLayerNodesSize=(2, 2),
                          docLayerNodesNum=2,
                          docLayerNodesSize=(2, 3),
                          datatype=theano.config.floatX,
                          pooling_mode="average_exc_pad"):
        self.__wordEmbeddingDim = wordEmbeddingDim
        self.__sentenceLayerNodesNum = sentenceLayerNodesNum
        self.__sentenceLayerNodesSize = sentenceLayerNodesSize
        self.__docLayerNodesNum = docLayerNodesNum
        self.__docLayerNodesSize = docLayerNodesSize
        self.__WBound = 0.2
        self.__MAXDIM = 10000
        self.__datatype = datatype
        self.sentenceW = None
        self.sentenceB = None
        self.docW = None
        self.docB = None
        self.__pooling_mode = pooling_mode
        
        # For  DomEmbeddingNN optimizer.
#         self.shareRandge = T.arange(maxRandge)
        
        # Get sentence layer W
        self.sentenceW = theano.shared(
            numpy.asarray(
                rng.uniform(low=-self.__WBound, high=self.__WBound, size=(self.__sentenceLayerNodesNum, self.__sentenceLayerNodesSize[0], self.__sentenceLayerNodesSize[1])),
                dtype=datatype
            ),
            borrow=True
        )
        # Get sentence layer b
        sentenceB0 = numpy.zeros((sentenceLayerNodesNum,), dtype=datatype)
        self.sentenceB = theano.shared(value=sentenceB0, borrow=True)
        
        # Get doc layer W
        self.docW = theano.shared(
            numpy.asarray(
                rng.uniform(low=-self.__WBound, high=self.__WBound, size=(self.__docLayerNodesNum, self.__docLayerNodesSize[0], self.__docLayerNodesSize[1])),
                dtype=datatype
            ),
            borrow=True
        )
        # Get doc layer b
        docB0 = numpy.zeros((docLayerNodesNum,), dtype=datatype)
        self.docB = theano.shared(value=docB0, borrow=True)
        
        self.sentenceResults, _ = theano.scan(fn=self.__dealWithSentence,
                            non_sequences=[corpus, self.sentenceW, self.sentenceB],
                             sequences=[dict(input=sentenceWordCount, taps=[-1, -0])],
                             strict=True)
        
#         p = printing.Print('docPool')
#         docPool = p(docPool)
#         p = printing.Print('sentenceResults')
#         sentenceResults = p(sentenceResults)
#         p = printing.Print('doc_out')
#         doc_out = p(doc_out)
        doc_out = conv.conv2d(input=self.sentenceResults, filters=self.docW)
        docPool = downsample.max_pool_2d(doc_out, (self.__MAXDIM, 1), mode=self.__pooling_mode, ignore_border=False)
        docOutput = T.tanh(docPool + self.docB.dimshuffle([0, 'x', 'x']))
        self.output = docOutput.flatten(1)
        
        self.params = [self.sentenceW, self.sentenceB, self.docW, self.docB]
        self.outputDimension = self.__docLayerNodesNum * \
                                                  (self.__sentenceLayerNodesNum * (self.__wordEmbeddingDim - self.__sentenceLayerNodesSize[1] + 1) - self.__docLayerNodesSize[1] + 1)
コード例 #21
0
ファイル: Player.py プロジェクト: suryabhupa/MADbot
sys.path.append('./pdnn')
from models.dnn import DNN
from io_func.model_io import _file2nnet
from io_func import smart_open
import cPickle
import theano
from theano.tensor.signal import conv
import theano.tensor as T


numpy_rng = numpy.random.RandomState(101)

# CODE BELOW NEEDED TO MAKE CONVOLUTIONS WITH THEANO
input = T.dmatrix('input')
filter = T.dmatrix('filter')
conv_out = conv.conv2d(input, filter)
convolution_function = theano.function([input, filter], conv_out)
#################################################

"""
Simple example pokerbot, written in python.

This is an example of a bare bones pokerbot. It only sets up the socket
necessary to connect with the engine and then always returns the same action.
It is meant as an example of how a pokerbot should communicate with the engine.
"""

def load_flop_network(nnet_param = 'neural_network/flop_network_params', nnet_cfg = 'neural_network/flop_network_cfg'):
    cfg = cPickle.load(smart_open(nnet_cfg,'r'))
    cfg.init_activation()
    model = DNN(numpy_rng=numpy_rng, cfg = cfg)
コード例 #22
0
ファイル: theano.py プロジェクト: ysulsky/eblearn-python
def make_theano_m2_convolve(mattype_inp, mattype_ker):
    dtype = np.dtype(rtype).name
    input  = mattype_inp('input',  dtype)
    kernel = mattype_ker('kernel', dtype)
    return function([input, kernel], conv2d(input, kernel))