def fprop(self, state_below):

        check_cuda(str(type(self)))
        self.input_space.validate(state_below)
        if not hasattr(self, 'input_normalization'):
            self.input_normalization = None

        if self.input_normalization:
            state_below = self.input_normalization(state_below)

        # fft 3d covolution
        z = self.transformer.lmul(state_below)

        # bias addition
        if not hasattr(self, 'tied_b'):
            self.tied_b = False
        if self.tied_b:
            b = self.b.dimshuffle(0, 'x', 'x', 'x', 'x')
        else:
            b = self.b.dimshuffle('x', 0, 1, 2, 3)
        z = z + self.b

        if self.layer_name is not None:
            z.name = self.layer_name + '_z'
        self.detector_space.validate(z)
        #assert self.detector_space.num_channels % 16 == 0

        #ReLUs
        z = T.maximum(z, 0)
        if self.output_space.num_channels % 16 != 0:
            raise NotImplementedError(
                "num channles should always be dvisible by 16")

        # alex's max pool op only works when the number of channels
        # is divisible by 16. we can only do the cross-channel pooling
        # first if the cross-channel pooling preserves that property

        # Pooling
        # permute axes ['b', 0, 1,'t','c'] -> ['c', 0, 1, 't', 'b'] (axes required for pooling )
        z = z.dimshuffle(4, 1, 2, 3, 0)

        # spatial pooling x/y
        z_shape = z.shape
        z = z.reshape(
            (z_shape[0], z_shape[1], z_shape[2], z_shape[3] * z_shape[4]))
        p = max_pool_c01b(c01b=z,
                          pool_shape=self.pool_shape,
                          pool_stride=self.pool_stride)

        p_shape = p.shape
        p = p.reshape(
            (p_shape[0], p_shape[1], p_shape[2], z_shape[3], z_shape[4]))

        # temporal pooling with overlap (t)
        p_shape = p.shape

        #['c', 0, 1, 't', 'b'] -> ['c',0*1,'t','b'] ('c',0, 1,'b') for max_pool_c01b
        p = p.reshape(
            (p_shape[0], p_shape[1] * p_shape[2], p_shape[3], p_shape[4]))
        t = temporal_max_pool_c01b(c01b=p,
                                   pool_shape=self.pool_temporal_shape,
                                   pool_stride=self.pool_temporal_stride,
                                   image_shape=self.temp_pool_input_shape)
        t_shape = t.shape
        t = t.reshape(
            (t_shape[0], p_shape[1], p_shape[2], t_shape[2], t_shape[3]))
        # Permute back axes ['c', 0, 1, 't', 'b'] -> ['b', 0, 1, 't', 'c']
        t = t.dimshuffle(4, 1, 2, 3, 0)
        self.output_space.validate(t)

        if not hasattr(self, 'output_normalization'):
            self.output_normalization = None

        if self.output_normalization:
            t = self.output_normalization(t)

        return t
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        # set up detector space and initialize transformer
        setup_detector_layer_b01tc(layer=self,
                                   input_space=space,
                                   rng=self.mlp.rng,
                                   irange=self.irange)
        rng = self.mlp.rng
        detector_shape = self.detector_space.shape

        #def handle_pool_shape(idx):
        #    if self.pool_shape[idx] < 1:
        #        raise ValueError("bad pool shape: " + str(self.pool_shape))
        #    if self.pool_shape[idx] > detector_shape[idx]:
        #        if self.fix_pool_shape:
        #            assert detector_shape[idx] > 0
        #            self.pool_shape[idx] = detector_shape[idx]
        #        else:
        #            raise ValueError("Pool shape exceeds detector layer shape on axis %d" % idx)
        #map(handle_pool_shape, [0, 1, 2])

        ### Check some precondition
        assert self.pool_shape[0] == self.pool_shape[1]
        assert self.pool_stride[0] == self.pool_stride[1]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)
        for i in xrange(0, 2):
            assert self.pool_stride[i] <= self.pool_shape[i]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)

        dummy_shape = [self.input_space.shape[0], self.input_space.shape[1]]

        # added to find out output space shape after temporal and spatial pooling "max_pool_c01b"
        dummy_output_shape = [
            int(np.ceil((i_sh + 2. * self.pad - k_sh) / float(k_st))) + 1
            for i_sh, k_sh, k_st in zip(dummy_shape, self.kernel_shape,
                                        self.kernel_stride)
        ]

        dummy_output_shape = [dummy_output_shape[0], dummy_output_shape[1]]
        #print dummy_output_shape
        dummy_detector_space = Conv2DSpace(shape=dummy_output_shape,
                                           num_channels=self.detector_channels,
                                           axes=('c', 0, 1, 'b'))

        # picked only 16 channels and 1 image in order to do a fast dummy maxpooling (16 because Alex's code needs at least 16 channels)
        dummy_detector = sharedX(
            dummy_detector_space.get_origin_batch(2)[0:16, :, :, :])

        dummy_p = max_pool_c01b(c01b=dummy_detector,
                                pool_shape=self.pool_shape,
                                pool_stride=self.pool_stride)
        dummy_p = dummy_p.eval()

        # set space after temporal pooling with overlap
        if self.pool_temporal_stride[1] > self.pool_temporal_shape[1]:
            if self.fix_pool_stride:
                warnings.warn("Fixing the pool stride")
                ps = self.pool_temporal_shape[1]
                assert isinstance(ps, py_integer_types)
                self.pool_stride = [1, ps]
            else:
                raise ValueError("Stride too big.")
        # (0*1,'t')
        dummy_temp_image = [(dummy_p.shape[1] * dummy_p.shape[2]),
                            self.detector_space.shape[2]]
        #overlapped temporal max pooling image_shape
        self.temp_pool_input_shape = dummy_temp_image
        dummy_temp_space = Conv2DSpace(shape=dummy_temp_image,
                                       num_channels=self.detector_channels,
                                       axes=('c', 0, 1, 'b'))
        temp_input = sharedX(
            dummy_temp_space.get_origin_batch(2)[0:16, :, :, :])
        dummy_temp_p = temporal_max_pool_c01b(
            c01b=temp_input,
            pool_shape=self.pool_temporal_shape,
            pool_stride=self.pool_temporal_stride,
            image_shape=dummy_temp_image)
        dummy_temp_p = dummy_temp_p.eval()

        self.output_space = Conv3DSpace(
            shape=[dummy_p.shape[1], dummy_p.shape[2], dummy_temp_p.shape[2]],
            num_channels=self.num_channels,
            axes=('b', 0, 1, 't', 'c'))

        # Print spaces
        print "Input shape: ", self.input_space.shape
        print "Detector space: ", self.detector_space.shape
        print "Output space: ", self.output_space.shape
    def fprop(self, state_below):

        check_cuda(str(type(self)))
        self.input_space.validate(state_below)
        if not hasattr(self, 'input_normalization'):
            self.input_normalization = None

        if self.input_normalization:
            state_below = self.input_normalization(state_below)

        # fft 3d covolution
        z = self.transformer.lmul(state_below)

        # bias addition
        if not hasattr(self, 'tied_b'):
            self.tied_b = False
        if self.tied_b:
            b = self.b.dimshuffle(0, 'x', 'x', 'x', 'x')
        else:
            b = self.b.dimshuffle('x', 0, 1, 2, 3)
        z = z + self.b



        if self.layer_name is not None:
           z.name = self.layer_name + '_z'
        self.detector_space.validate(z)
        #assert self.detector_space.num_channels % 16 == 0

        #ReLUs
        z = T.maximum(z, 0)
        if self.output_space.num_channels % 16 != 0:
            raise NotImplementedError("num channles should always be dvisible by 16")

        # alex's max pool op only works when the number of channels
        # is divisible by 16. we can only do the cross-channel pooling
        # first if the cross-channel pooling preserves that property

        # Pooling
        # permute axes ['b', 0, 1,'t','c'] -> ['c', 0, 1, 't', 'b'] (axes required for pooling )
        z = z.dimshuffle(4, 1, 2, 3, 0)

        # spatial pooling x/y
        z_shape = z.shape
        z = z.reshape((z_shape[0], z_shape[1], z_shape[2], z_shape[3] * z_shape[4]))
        p = max_pool_c01b(c01b = z,
                          pool_shape = self.pool_shape[0:2],
                          pool_stride = self.pool_stride[0:2])
        p = p.reshape((p.shape[0], p.shape[1], p.shape[2], z_shape[3], z_shape[4]))


        # temporal pooling with overlap (t)
        p_shape = p.shape
        #['c', 0, 1, 't', 'b'] -> ['c',0*1,'t','b'] ('c',0, 1,'b') for max_pool_c01b
        p = p.reshape((p_shape[0], p_shape[1] * p_shape[2], p_shape[3] , p_shape[4]))
        t = temporal_max_pool_c01b(c01b = p,
                                   pool_shape = [1, self.pool_shape[2]],
                                   pool_stride = [1, self.pool_stride[2]],
                                   image_shape = self.temp_pool_input_shape)
        t_shape = t.shape
        t = t.reshape((t_shape[0], p_shape[1] , p_shape[2], t_shape[2] , t_shape[3]))
        # Permute back axes ['c', 0, 1, 't', 'b'] -> ['b', 0, 1, 't', 'c']
        t = t.dimshuffle(4, 1, 2, 3, 0)
        self.output_space.validate(t)

        if not hasattr(self, 'output_normalization'):
            self.output_normalization = None

        if self.output_normalization:
            t = self.output_normalization(t)

        return t
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        # setup_detector_layer_bct01(layer=self,
        #                            input_space=space,
        #                            rng=self.mlp.rng,
        #                            irange=self.irange)
        # Use theano conv3d instead
        setup_detector_layer_b01tc(layer=self,
                                   input_space=space,
                                   rng=self.mlp.rng,
                                   irange=self.irange)
        rng = self.mlp.rng
        detector_shape = self.detector_space.shape

        def handle_pool_shape(idx):
            if self.pool_shape[idx] < 1:
                raise ValueError("bad pool shape: " + str(self.pool_shape))
            if self.pool_shape[idx] > detector_shape[idx]:
                if self.fix_pool_shape:
                    assert detector_shape[idx] > 0
                    self.pool_shape[idx] = detector_shape[idx]
                else:
                    raise ValueError("Pool shape exceeds detector layer shape on axis %d" % idx)
        map(handle_pool_shape, [0, 1, 2])


        ### Check some precondition
        assert self.pool_shape[0] == self.pool_shape[1]
        assert self.pool_stride[0] == self.pool_stride[1]
        assert all(isinstance(elem, py_integer_types) for elem in self.pool_stride)
        for i in xrange(0, 2):
            assert self.pool_stride[i] <= self.pool_shape[i]
        assert all(isinstance(elem, py_integer_types) for elem in self.pool_stride)

        # Find space shape after convolution
        dummy_output_shape = [int(np.ceil((i_sh + 2. * self.pad - k_sh) / float(k_st))) + 1
                              for i_sh, k_sh, k_st in zip(self.input_space.shape,
                                                          self.kernel_shape,
                                                          self.kernel_stride)]
        dummy_output_sequence_length = dummy_output_shape[2]

        ### Find the space shape after spatial pooling
        dummy_output_shape = [dummy_output_shape[0],
                              dummy_output_shape[1]]
        dummy_detector_space = Conv2DSpace(shape=dummy_output_shape,
                                           num_channels = self.detector_channels,
                                           axes = ('c', 0, 1, 'b'))
        dummy_detector = sharedX(dummy_detector_space.get_origin_batch(2)[0:16, :, :, :])
        dummy_p = max_pool_c01b(c01b = dummy_detector,
                                pool_shape = self.pool_shape,
                                pool_stride = self.pool_stride)
        dummy_p = dummy_p.eval()

        ### Find the space shape after temporal pooling
        # (0*1,'t')
        dummy_temp_image = [dummy_p.shape[1] * dummy_p.shape[2] , dummy_output_sequence_length]
        self.temp_pool_input_shape = dummy_temp_image
        dummy_temp_space = Conv2DSpace(shape=dummy_temp_image,
                                       num_channels = self.detector_channels,
                                       axes = ('c', 0, 1, 'b'))
        temp_input = sharedX(dummy_temp_space.get_origin_batch(2)[0:16,:,:,:])
        dummy_temp_p = temporal_max_pool_c01b(c01b=temp_input,
                                              pool_shape = [1, self.pool_shape[2]],
                                              pool_stride = [1, self.pool_stride[2]],
                                              image_shape = dummy_temp_image)
        dummy_temp_p = dummy_temp_p.eval()
        self.output_space = Conv3DSpace(shape=[dummy_p.shape[1],
                                               dummy_p.shape[2],
                                               dummy_temp_p.shape[2]],
                                        num_channels = self.num_channels,
                                        axes = ('b', 0, 1,'t','c'))

        # Print spaces
        print "Input shape: ", self.input_space.shape
        print "Detector space: ", self.detector_space.shape
        print "Output space: ", self.output_space.shape
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        # set up detector space and initialize transformer
        setup_detector_layer_b01tc(layer=self,
                                   input_space=space,
                                   rng=self.mlp.rng,
                                   irange=self.irange)
        rng = self.mlp.rng
        detector_shape = self.detector_space.shape

        #def handle_pool_shape(idx):
        #    if self.pool_shape[idx] < 1:
        #        raise ValueError("bad pool shape: " + str(self.pool_shape))
        #    if self.pool_shape[idx] > detector_shape[idx]:
        #        if self.fix_pool_shape:
        #            assert detector_shape[idx] > 0
        #            self.pool_shape[idx] = detector_shape[idx]
        #        else:
        #            raise ValueError("Pool shape exceeds detector layer shape on axis %d" % idx)
        #map(handle_pool_shape, [0, 1, 2])


        ### Check some precondition
        assert self.pool_shape[0] == self.pool_shape[1]
        assert self.pool_stride[0] == self.pool_stride[1]
        assert all(isinstance(elem, py_integer_types) for elem in self.pool_stride)
        for i in xrange(0, 2):
            assert self.pool_stride[i] <= self.pool_shape[i]
        assert all(isinstance(elem, py_integer_types) for elem in self.pool_stride)

        dummy_shape = [self.input_space.shape[0] , self.input_space.shape[1]]
		
        # added to find out output space shape after temporal and spatial pooling "max_pool_c01b" 	
        dummy_output_shape = [int(np.ceil((i_sh + 2. * self.pad - k_sh) / float(k_st))) + 1
                              for i_sh, k_sh, k_st in zip(dummy_shape,
                                                          self.kernel_shape,
                                                          self.kernel_stride)]
        
        dummy_output_shape = [dummy_output_shape[0],
                              dummy_output_shape[1]]
        #print dummy_output_shape
        dummy_detector_space = Conv2DSpace(shape=dummy_output_shape,
                                           num_channels = self.detector_channels,
                                           axes = ('c', 0, 1, 'b'))
        
		# picked only 16 channels and 1 image in order to do a fast dummy maxpooling (16 because Alex's code needs at least 16 channels)
        dummy_detector = sharedX(dummy_detector_space.get_origin_batch(2)[0:16,:,:,:])

        dummy_p = max_pool_c01b(c01b=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride)
        dummy_p = dummy_p.eval()
     
        # set space after temporal pooling with overlap
        if self.pool_temporal_stride[1] > self.pool_temporal_shape[1]:
            if self.fix_pool_stride:
                warnings.warn("Fixing the pool stride")
                ps = self.pool_temporal_shape[1]
                assert isinstance(ps, py_integer_types)
                self.pool_stride = [1, ps]
            else:
                raise ValueError("Stride too big.")
        # (0*1,'t')
        dummy_temp_image = [(dummy_p.shape[1]*dummy_p.shape[2]) , self.detector_space.shape[2]]
        #overlapped temporal max pooling image_shape 
        self.temp_pool_input_shape = dummy_temp_image
        dummy_temp_space = Conv2DSpace(shape=dummy_temp_image,
                                           num_channels = self.detector_channels,
                                           axes = ('c', 0, 1, 'b'))
        temp_input = sharedX(dummy_temp_space.get_origin_batch(2)[0:16,:,:,:])	
        dummy_temp_p = temporal_max_pool_c01b(c01b=temp_input, pool_shape=self.pool_temporal_shape, pool_stride=self.pool_temporal_stride,image_shape=dummy_temp_image)
        dummy_temp_p = dummy_temp_p.eval()
   
        self.output_space = Conv3DSpace(shape=[dummy_p.shape[1], dummy_p.shape[2],dummy_temp_p.shape[2]],num_channels = self.num_channels,axes = ('b', 0, 1,'t','c'))
       

        # Print spaces
        print "Input shape: ", self.input_space.shape
        print "Detector space: ", self.detector_space.shape
        print "Output space: ", self.output_space.shape
Beispiel #6
0
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        # setup_detector_layer_bct01(layer=self,
        #                            input_space=space,
        #                            rng=self.mlp.rng,
        #                            irange=self.irange)
        # Use theano conv3d instead
        setup_detector_layer_b01tc(layer=self,
                                   input_space=space,
                                   rng=self.mlp.rng,
                                   irange=self.irange)
        rng = self.mlp.rng
        detector_shape = self.detector_space.shape

        def handle_pool_shape(idx):
            if self.pool_shape[idx] < 1:
                raise ValueError("bad pool shape: " + str(self.pool_shape))
            if self.pool_shape[idx] > detector_shape[idx]:
                if self.fix_pool_shape:
                    assert detector_shape[idx] > 0
                    self.pool_shape[idx] = detector_shape[idx]
                else:
                    raise ValueError(
                        "Pool shape exceeds detector layer shape on axis %d" %
                        idx)

        map(handle_pool_shape, [0, 1, 2])

        ### Check some precondition
        assert self.pool_shape[0] == self.pool_shape[1]
        assert self.pool_stride[0] == self.pool_stride[1]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)
        for i in xrange(0, 2):
            assert self.pool_stride[i] <= self.pool_shape[i]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)

        # Find space shape after convolution
        dummy_output_shape = [
            int(np.ceil((i_sh + 2. * self.pad - k_sh) / float(k_st))) + 1
            for i_sh, k_sh, k_st in zip(self.input_space.shape,
                                        self.kernel_shape, self.kernel_stride)
        ]
        dummy_output_sequence_length = dummy_output_shape[2]

        ### Find the space shape after spatial pooling
        dummy_output_shape = [dummy_output_shape[0], dummy_output_shape[1]]
        dummy_detector_space = Conv2DSpace(shape=dummy_output_shape,
                                           num_channels=self.detector_channels,
                                           axes=('c', 0, 1, 'b'))
        dummy_detector = sharedX(
            dummy_detector_space.get_origin_batch(2)[0:16, :, :, :])
        dummy_p = max_pool_c01b(c01b=dummy_detector,
                                pool_shape=self.pool_shape,
                                pool_stride=self.pool_stride)
        dummy_p = dummy_p.eval()

        ### Find the space shape after temporal pooling
        # (0*1,'t')
        dummy_temp_image = [
            dummy_p.shape[1] * dummy_p.shape[2], dummy_output_sequence_length
        ]
        self.temp_pool_input_shape = dummy_temp_image
        dummy_temp_space = Conv2DSpace(shape=dummy_temp_image,
                                       num_channels=self.detector_channels,
                                       axes=('c', 0, 1, 'b'))
        temp_input = sharedX(
            dummy_temp_space.get_origin_batch(2)[0:16, :, :, :])
        dummy_temp_p = temporal_max_pool_c01b(
            c01b=temp_input,
            pool_shape=[1, self.pool_shape[2]],
            pool_stride=[1, self.pool_stride[2]],
            image_shape=dummy_temp_image)
        dummy_temp_p = dummy_temp_p.eval()
        self.output_space = Conv3DSpace(
            shape=[dummy_p.shape[1], dummy_p.shape[2], dummy_temp_p.shape[2]],
            num_channels=self.num_channels,
            axes=('b', 0, 1, 't', 'c'))

        # Print spaces
        print "Input shape: ", self.input_space.shape
        print "Detector space: ", self.detector_space.shape
        print "Output space: ", self.output_space.shape