Exemple #1
0
  def __init__(self, collapse='mean', maxout=False, transpose=False, **kwargs):
    super(TwoDToOneDLayer, self).__init__(1, **kwargs)
    self.set_attr('collapse', collapse)
    self.set_attr('transpose', transpose)
    Y = self.sources[0].output
    if transpose:
      Y = Y.dimshuffle(1, 0, 2, 3)

    #index handling
    def index_fn(index, size):
      return T.set_subtensor(index[:size], numpy.cast['int8'](1))
    index_init = T.zeros((Y.shape[2],Y.shape[1]), dtype='int8')
    self.index, _ = theano.scan(index_fn, [index_init, T.cast(self.sources[0].output_sizes[:,1],"int32")])
    self.index = self.index.dimshuffle(1, 0)
    n_out = self.sources[0].attrs['n_out']

    if maxout:
      Y = Y.max(axis=3).dimshuffle(0,1,2,'x')

    if collapse == 'sum' or collapse == True:
      Y = Y.sum(axis=0)
    elif collapse == 'mean':
      Y = Y.mean(axis=0)
    elif collapse == 'conv':
      from TheanoUtil import circular_convolution
      Y, _ = theano.scan(lambda x_i,x_p:circular_convolution(x_i,x_p),Y,Y[0])
      Y = Y[-1]
    elif collapse == 'flatten':
      self.index = T.ones((Y.shape[0] * Y.shape[1], Y.shape[2]), dtype='int8')
      Y = Y.reshape((Y.shape[0]*Y.shape[1],Y.shape[2],Y.shape[3]))
    elif str(collapse).startswith('pad_'):
      pad = numpy.int32(collapse.split('_')[-1])
      Y = ifelse(T.lt(Y.shape[0],pad),T.concatenate([Y,T.zeros((pad-Y.shape[0],Y.shape[1],Y.shape[2],Y.shape[3]),'float32')],axis=0),
                 ifelse(T.gt(Y.shape[0],pad),Y[:pad],Y))
      Y = Y.dimshuffle(1,2,3,0).reshape((Y.shape[1],Y.shape[2],Y.shape[3]*Y.shape[0]))
      n_out *= pad
    elif collapse != False:
      assert False, "invalid collapse mode"

    if self.attrs['batch_norm']:
      Y = self.batch_norm(Y, n_out, force_sample=False)
    self.output = Y
    self.act = [Y, Y]
    self.set_attr('n_out', n_out)
Exemple #2
0
  def __init__(self, collapse='mean', maxout=False, transpose=False, **kwargs):
    super(TwoDToOneDLayer, self).__init__(1, **kwargs)
    self.set_attr('collapse', collapse)
    self.set_attr('transpose', transpose)
    Y = self.sources[0].output
    if transpose:
      Y = Y.dimshuffle(1, 0, 2, 3)

    #index handling
    def index_fn(index, size):
      return T.set_subtensor(index[:size], numpy.cast['int8'](1))
    index_init = T.zeros((Y.shape[2],Y.shape[1]), dtype='int8')
    self.index, _ = theano.scan(index_fn, [index_init, T.cast(self.sources[0].output_sizes[:,1],"int32")])
    self.index = self.index.dimshuffle(1, 0)
    n_out = self.sources[0].attrs['n_out']

    if maxout:
      Y = Y.max(axis=3).dimshuffle(0,1,2,'x')

    if collapse == 'sum' or collapse == True:
      Y = Y.sum(axis=0)
    elif collapse == 'mean':
      Y = Y.mean(axis=0)
    elif collapse == 'conv':
      from TheanoUtil import circular_convolution
      Y, _ = theano.scan(lambda x_i,x_p:circular_convolution(x_i,x_p),Y,Y[0])
      Y = Y[-1]
    elif collapse == 'flatten':
      self.index = T.ones((Y.shape[0] * Y.shape[1], Y.shape[2]), dtype='int8')
      Y = Y.reshape((Y.shape[0]*Y.shape[1],Y.shape[2],Y.shape[3]))
    elif str(collapse).startswith('pad_'):
      pad = numpy.int32(collapse.split('_')[-1])
      Y = ifelse(T.lt(Y.shape[0],pad),T.concatenate([Y,T.zeros((pad-Y.shape[0],Y.shape[1],Y.shape[2],Y.shape[3]),'float32')],axis=0),
                 ifelse(T.gt(Y.shape[0],pad),Y[:pad],Y))
      Y = Y.dimshuffle(1,2,3,0).reshape((Y.shape[1],Y.shape[2],Y.shape[3]*Y.shape[0]))
      n_out *= pad
    elif collapse != False:
      assert False, "invalid collapse mode"

    if self.attrs['batch_norm']:
      Y = self.batch_norm(Y, n_out, force_sample=False)
    self.output = Y
    self.act = [Y, Y]
    self.set_attr('n_out', n_out)
    def __init__(self,
                 n_out,
                 collapse_output=False,
                 directions=4,
                 projection='average',
                 base=None,
                 **kwargs):
        if base is None:
            base = []
        super(TwoDLSTMLayer, self).__init__(n_out, **kwargs)
        assert len(self.sources) == 1
        source = self.sources[0]
        n_in = source.attrs['n_out']
        X = source.output
        assert X.ndim == 4
        sizes = source.output_sizes
        self.output_sizes = sizes
        assert directions in [1, 2,
                              4], "only 1, 2 or 4 directions are supported"
        assert projection in ['average', 'concat'], "invalid projection"

        if base:
            self.b1 = self.add_param(base[0].b1)
            self.b2 = self.add_param(base[0].b2)
            if directions >= 1:
                self.b3 = self.add_param(base[0].b3)
                self.b4 = self.add_param(base[0].b4)
            self.W1, self.V_h1, self.V_v1 = self.add_param(
                base[0].W1), self.add_param(base[0].V_h1), self.add_param(
                    base[0].V_v1)
            self.W2, self.V_h2, self.V_v2 = self.add_param(
                base[0].W2), self.add_param(base[0].V_h2), self.add_param(
                    base[0].V_v2)
            if directions >= 1:
                self.W3, self.V_h3, self.V_v3 = self.add_param(
                    base[0].W3), self.add_param(base[0].V_h3), self.add_param(
                        base[0].V_v3)
                self.W4, self.V_h4, self.V_v4 = self.add_param(
                    base[0].W4), self.add_param(base[0].V_h4), self.add_param(
                        base[0].V_v4)
            #self.mass = base[0].mass
            #self.masks = base[0].masks
            #self.b1 = base[0].b1
            #self.b2 = base[0].b2
            #if directions >= 1:
            #  self.b3 = base[0].b3
            #  self.b4 = base[0].b4
            #self.W1, self.V_h1, self.V_v1 = base[0].W1, base[0].V_h1, base[0].V_v1
            #self.W2, self.V_h2, self.V_v2 = base[0].W2, base[0].V_h2, base[0].V_v2
            #if directions >= 1:
            #  self.W3, self.V_h3, self.V_v3 = base[0].W3, base[0].V_h3, base[0].V_v3
            #  self.W4, self.V_h4, self.V_v4 = base[0].W4, base[0].V_h4, base[0].V_v4
            self.mass = base[0].mass
            self.masks = base[0].masks
        else:
            self.b1 = self.create_and_add_bias(n_out, "1")
            self.b2 = self.create_and_add_bias(n_out, "2")
            if directions >= 1:
                self.b3 = self.create_and_add_bias(n_out, "3")
                self.b4 = self.create_and_add_bias(n_out, "4")

            self.W1, self.V_h1, self.V_v1 = self.create_and_add_2d_lstm_weights(
                n_in, n_out, "1")
            self.W2, self.V_h2, self.V_v2 = self.create_and_add_2d_lstm_weights(
                n_in, n_out, "2")
            if directions >= 1:
                self.W3, self.V_h3, self.V_v3 = self.create_and_add_2d_lstm_weights(
                    n_in, n_out, "3")
                self.W4, self.V_h4, self.V_v4 = self.create_and_add_2d_lstm_weights(
                    n_in, n_out, "4")

        # dropout
        assert len(self.masks) == 1
        mask = self.masks[0]
        if mask is not None:
            X = self.mass * mask * X

        if str(theano.config.device).startswith('cpu'):
            Y = T.zeros_like(X)
            if projection == 'concat':
                Y = Y.repeat(directions, axis=-1)
                n_out *= directions
        else:
            if directions <= 2:
                Y = BidirectionalTwoDLSTMOpInstance(X, self.W1, self.W2,
                                                    self.V_h1, self.V_h2,
                                                    self.V_v1, self.V_v2,
                                                    self.b1, self.b2, sizes)
            else:
                Y = MultiDirectionalTwoDLSTMOpInstance(
                    X, self.W1, self.W2, self.W3, self.W4, self.V_h1,
                    self.V_h2, self.V_h3, self.V_h4, self.V_v1, self.V_v2,
                    self.V_v3, self.V_v4, self.b1, self.b2, self.b3, self.b4,
                    sizes)

            if directions > 1:
                Y = T.stack(Y[:directions], axis=-1)
                if projection == 'average':
                    Y = Y.mean(axis=-1)
                elif projection == 'concat':
                    Y = Y.reshape((Y.shape[0], Y.shape[1], Y.shape[2],
                                   Y.shape[3] * Y.shape[4]))
                    n_out *= directions
            else:
                Y = Y[0]

        Y.name = 'Y'
        self.set_attr('n_out', n_out)
        self.set_attr('collapse_output', collapse_output)
        self.set_attr('directions', directions)
        self.set_attr('projection', projection)

        #index handling
        def index_fn(index, size):
            return T.set_subtensor(index[:size], numpy.cast['int8'](1))

        index_init = T.zeros((Y.shape[2], Y.shape[1]), dtype='int8')
        self.index, _ = theano.scan(
            index_fn, [index_init, T.cast(sizes[:, 1], "int32")])
        self.index = self.index.dimshuffle(1, 0)

        if collapse_output == 'sum' or collapse_output == True:
            Y = Y.sum(axis=0)
        elif collapse_output == 'mean':
            Y = Y.mean(axis=0)
        elif collapse_output == 'conv':
            from TheanoUtil import circular_convolution
            Y, _ = theano.scan(lambda x_i, x_p: circular_convolution(x_i, x_p),
                               Y, Y[0])
            Y = Y[-1]
        elif collapse_output == 'flatten':
            self.index = T.ones((Y.shape[0] * Y.shape[1], Y.shape[2]),
                                dtype='int8')
            Y = Y.reshape((Y.shape[0] * Y.shape[1], Y.shape[2], Y.shape[3]))
        elif str(collapse_output).startswith('pad_'):
            pad = numpy.int32(collapse_output.split('_')[-1])
            Y = ifelse(
                T.lt(Y.shape[0], pad),
                T.concatenate([
                    Y,
                    T.zeros(
                        (pad - Y.shape[0], Y.shape[1], Y.shape[2], Y.shape[3]),
                        'float32')
                ],
                              axis=0), ifelse(T.gt(Y.shape[0], pad), Y[:pad],
                                              Y))
            Y = Y.dimshuffle(1, 2, 3, 0).reshape(
                (Y.shape[1], Y.shape[2], Y.shape[3] * Y.shape[0]))
            self.attrs['n_out'] *= pad
        elif collapse_output != False:
            assert False, "invalid collapse mode"

        if self.attrs['batch_norm']:
            Y = self.batch_norm(
                Y,
                self.attrs['n_out'],
                index=sizes if not collapse_output else self.index,
                force_sample=False)

        self.output = Y
Exemple #4
0
  def __init__(self, n_out, collapse_output=False, directions=4, projection='average', base=None, **kwargs):
    if base is None:
      base = []
    super(TwoDLSTMLayer, self).__init__(n_out, **kwargs)
    assert len(self.sources) == 1
    source = self.sources[0]
    n_in = source.attrs['n_out']
    X = source.output
    assert X.ndim == 4
    sizes = source.output_sizes
    if source.layer_class == "1Dto2D":
      #sizes has the wrong layout if coming directly from a 1Dto2D layer
      sizes = sizes.reshape((2, sizes.size // 2)).dimshuffle(1, 0)
    self.output_sizes = sizes
    assert directions in [1,2,4], "only 1, 2 or 4 directions are supported"
    assert projection in ['average', 'concat'], "invalid projection"

    if base:
      #self.b1 = self.add_param(base[0].b1)
      #self.b2 = self.add_param(base[0].b2)
      #if directions >= 1:
      #  self.b3 = self.add_param(base[0].b3)
      #  self.b4 = self.add_param(base[0].b4)
      #self.W1, self.V_h1, self.V_v1 = self.add_param(base[0].W1), self.add_param(base[0].V_h1), self.add_param(base[0].V_v1)
      #self.W2, self.V_h2, self.V_v2 = self.add_param(base[0].W2), self.add_param(base[0].V_h2), self.add_param(base[0].V_v2)
      #if directions >= 1:
      #  self.W3, self.V_h3, self.V_v3 = self.add_param(base[0].W3), self.add_param(base[0].V_h3), self.add_param(base[0].V_v3)
      #  self.W4, self.V_h4, self.V_v4 = self.add_param(base[0].W4), self.add_param(base[0].V_h4), self.add_param(base[0].V_v4)
      #self.mass = base[0].mass
      #self.masks = base[0].masks
      self.b1 = base[0].b1
      self.b2 = base[0].b2
      if directions >= 1:
        self.b3 = base[0].b3
        self.b4 = base[0].b4
      self.W1, self.V_h1, self.V_v1 = base[0].W1, base[0].V_h1, base[0].V_v1
      self.W2, self.V_h2, self.V_v2 = base[0].W2, base[0].V_h2, base[0].V_v2
      if directions >= 1:
        self.W3, self.V_h3, self.V_v3 = base[0].W3, base[0].V_h3, base[0].V_v3
        self.W4, self.V_h4, self.V_v4 = base[0].W4, base[0].V_h4, base[0].V_v4
      self.mass = base[0].mass
      self.masks = base[0].masks
    else:
      self.b1 = self.create_and_add_bias(n_out, "1")
      self.b2 = self.create_and_add_bias(n_out, "2")
      if directions >= 1:
        self.b3 = self.create_and_add_bias(n_out, "3")
        self.b4 = self.create_and_add_bias(n_out, "4")

      self.W1, self.V_h1, self.V_v1 = self.create_and_add_2d_lstm_weights(n_in, n_out, "1")
      self.W2, self.V_h2, self.V_v2 = self.create_and_add_2d_lstm_weights(n_in, n_out, "2")
      if directions >= 1:
        self.W3, self.V_h3, self.V_v3 = self.create_and_add_2d_lstm_weights(n_in, n_out, "3")
        self.W4, self.V_h4, self.V_v4 = self.create_and_add_2d_lstm_weights(n_in, n_out, "4")

    # dropout
    assert len(self.masks) == 1
    mask = self.masks[0]
    if mask is not None:
      X = self.mass * mask * X

    if str(theano.config.device).startswith('cpu'):
      Y = T.zeros_like(X)
      if projection == 'concat':
        Y = Y.repeat(directions,axis=-1)
        n_out *= directions
    else:
      if directions <= 2:
        Y = BidirectionalTwoDLSTMOpInstance(X, self.W1, self.W2, self.V_h1, self.V_h2, self.V_v1, self.V_v2, self.b1, self.b2, sizes)
      else:
        Y = MultiDirectionalTwoDLSTMOpInstance(X, self.W1, self.W2, self.W3, self.W4, self.V_h1, self.V_h2, self.V_h3, self.V_h4,
                                               self.V_v1, self.V_v2, self.V_v3, self.V_v4, self.b1, self.b2, self.b3, self.b4, sizes)

      if directions > 1:
        Y = T.stack(Y[:directions],axis=-1)
        if projection == 'average':
          Y = Y.mean(axis=-1)
        elif projection == 'concat':
          Y = Y.reshape((Y.shape[0],Y.shape[1],Y.shape[2],Y.shape[3]*Y.shape[4]))
          n_out *= directions
      else:
        Y = Y[0]

    Y.name = 'Y'
    self.set_attr('n_out', n_out)
    self.set_attr('collapse_output', collapse_output)
    self.set_attr('directions', directions)
    self.set_attr('projection', projection)

    #index handling
    def index_fn(index, size):
      return T.set_subtensor(index[:size], numpy.cast['int8'](1))
    index_init = T.zeros((Y.shape[2],Y.shape[1]), dtype='int8')
    self.index, _ = theano.scan(index_fn, [index_init, T.cast(sizes[:,1],"int32")])
    self.index = self.index.dimshuffle(1, 0)

    if collapse_output == 'sum' or collapse_output == True:
      Y = Y.sum(axis=0)
    elif collapse_output == 'mean':
      Y = Y.mean(axis=0)
    elif collapse_output == 'conv':
      from TheanoUtil import circular_convolution
      Y, _ = theano.scan(lambda x_i,x_p:circular_convolution(x_i,x_p),Y,Y[0])
      Y = Y[-1]
    elif collapse_output == 'flatten':
      self.index = T.ones((Y.shape[0] * Y.shape[1], Y.shape[2]), dtype='int8')
      Y = Y.reshape((Y.shape[0]*Y.shape[1],Y.shape[2],Y.shape[3]))
    elif str(collapse_output).startswith('pad_'):
      pad = numpy.int32(collapse_output.split('_')[-1])
      Y = ifelse(T.lt(Y.shape[0],pad),T.concatenate([Y,T.zeros((pad-Y.shape[0],Y.shape[1],Y.shape[2],Y.shape[3]),'float32')],axis=0),
                 ifelse(T.gt(Y.shape[0],pad),Y[:pad],Y))
      Y = Y.dimshuffle(1,2,3,0).reshape((Y.shape[1],Y.shape[2],Y.shape[3]*Y.shape[0]))
      self.attrs['n_out'] *= pad
    elif collapse_output != False:
      assert False, "invalid collapse mode"

    if self.attrs['batch_norm']:
      Y = self.batch_norm(Y,self.attrs['n_out'],index=sizes if not collapse_output else self.index, force_sample=False)

    self.output = Y