예제 #1
0
def setup_detector_layer_b01tc(layer, input_space, rng, irange, stride):
    """
    Takes steps to set up an object for use as being some kind of
    convolutional layer.
    This function sets up only the detector layer.
    Parameters
    ----------
    layer: Any python object that allows the modifications described below and has
    the following attributes:
         pad: int describing amount of zero padding to add
         kernel_shape: 3-element tuple or list describing shape of kernel
         fix_kernel_shape: bool, if true, will shrink the kernel shape to make it
         feasible, as needed (useful for hyperparameter searchers)
         detector_channels: The number of channels in the detector layer
         init_bias: A numeric constant added to a tensor of zeros to initialize the
         bias
         tied_b: If true, biases are shared across all spatial locations

    input_space: A Conv3DSpace to be used as input to the layer

    rng: a numpy RandomState or equivalent

    irange: float. kernel elements are initialized randomly from U(-irange, irange)

    Does the following:
        raises a RuntimeError if cuda is not available
        sets layer.input_space to input_space
        sets up addition of dummy channels for compatibility with cuda-convnet:
            layer.dummy_channels: # of dummy channels that need to be added
                (You might want to check this and raise an Exception if it's not 0)
            layer.dummy_space: The Conv2DSpace representing the input with dummy channels
                added
        sets layer.detector_space to the space for the detector layer
        sets layer.transformer to be a Conv3DB01TC instance
        sets layer.b to the right value
    """

    # Use "self" to refer to layer from now on, so we can pretend we're just running
    # in the set_input_space method of the layer
    self = layer

    # Make sure cuda is available
    check_cuda(str(type(self)))

    # Validate input
    if not isinstance(input_space, Conv3DSpace):
        raise TypeError(
            "The input to a convolutional layer should be a Conv3DSpace, "
            " but layer " + self.layer_name + " got " +
            str(type(self.input_space)))

    if not hasattr(self, 'detector_channels'):
        raise ValueError(
            'layer argument must have a "detector_channels" attribute specifying how many channels to put in the convolution kernel stack.'
        )

    # Store the input space
    self.input_space = input_space

    #self.dummy_space = Conv3DSpace(shape=input_space.shape,
    #                               channels=input_space.num_channels + self.dummy_channels,
    #                               axes=('b', 'c', 't', 0, 1))

    if hasattr(self, 'kernel_stride'):
        kernel_stride = stride
    else:
        kernel_stride = stride

    #import pdb; pdb.set_trace()
    #dummy_shape = [self.input_space.shape[0], self.input_space.shape[1] ]
    output_shape = [
        int((i_sh + 2. * self.pad - k_sh) / float(k_st)) + 1 for i_sh, k_sh,
        k_st in zip(self.input_space.shape, self.kernel_shape, kernel_stride)
    ]

    def handle_kernel_shape(idx):
        if self.kernel_shape[idx] < 1:
            raise ValueError(
                "kernel must have strictly positive size on all axes but has shape: "
                + str(self.kernel_shape))
        if output_shape[idx] <= 0:
            if self.fix_kernel_shape:
                self.kernel_shape[
                    idx] = self.input_space.shape[idx] + 2 * self.pad
                assert self.kernel_shape[idx] != 0
                output_shape[idx] = 1
                warnings.warn(
                    "Had to change the kernel shape to make network feasible")
            else:
                raise ValueError(
                    "kernel too big for input (even with zero padding)")

    map(handle_kernel_shape, [0, 1, 2])

    # space required for 3dconv
    self.detector_space = Conv3DSpace(shape=output_shape,
                                      num_channels=self.detector_channels,
                                      axes=('b', 0, 1, 't', 'c'))

    if hasattr(self, 'partial_sum'):
        partial_sum = self.partial_sum
    else:
        partial_sum = 1
    # filter shape required for fft3dconv ('c_detector','c','t','0','1')
    filter_shape = (
        self.detector_space.num_channels,
        self.kernel_shape[0],
        self.kernel_shape[1],
        self.kernel_shape[2],
        self.input_space.num_channels,
    )

    # filter shape required for fft-3dconv ('b','c','t','0','1')
    signal_shape = (
        self.mlp.batch_size,
        self.input_space.shape[0],
        self.input_space.shape[1],
        self.input_space.shape[2],
        self.input_space.num_channels,
    )

    self.transformer = make_random_conv3D(irange=self.irange,
                                          input_axes=('b', 0, 1, 't', 'c'),
                                          output_axes=self.detector_space.axes,
                                          signal_shape=signal_shape,
                                          filter_shape=filter_shape,
                                          pad=self.pad,
                                          partial_sum=partial_sum,
                                          kernel_stride=kernel_stride,
                                          rng=rng)

    W, = self.transformer.get_params()
    W.name = 'W'

    if self.tied_b:
        self.b = sharedX(
            np.zeros((self.detector_space.num_channels)) + self.init_bias)
    else:
        self.b = sharedX(self.detector_space.get_origin() + self.init_bias)
    self.b.name = 'b'
예제 #2
0
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        # setup_detector_layer_bct01(layer=self,
        #                            input_space=space,
        #                            rng=self.mlp.rng,
        #                            irange=self.irange)
        # Use theano conv3d instead
        setup_detector_layer_b01tc(layer=self,
                                   input_space=space,
                                   rng=self.mlp.rng,
                                   irange=self.irange)
        rng = self.mlp.rng
        detector_shape = self.detector_space.shape

        def handle_pool_shape(idx):
            if self.pool_shape[idx] < 1:
                raise ValueError("bad pool shape: " + str(self.pool_shape))
            if self.pool_shape[idx] > detector_shape[idx]:
                if self.fix_pool_shape:
                    assert detector_shape[idx] > 0
                    self.pool_shape[idx] = detector_shape[idx]
                else:
                    raise ValueError(
                        "Pool shape exceeds detector layer shape on axis %d" %
                        idx)

        map(handle_pool_shape, [0, 1, 2])

        ### Check some precondition
        assert self.pool_shape[0] == self.pool_shape[1]
        assert self.pool_stride[0] == self.pool_stride[1]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)
        for i in xrange(0, 2):
            assert self.pool_stride[i] <= self.pool_shape[i]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)

        # Find space shape after convolution
        dummy_output_shape = [
            int(np.ceil((i_sh + 2. * self.pad - k_sh) / float(k_st))) + 1
            for i_sh, k_sh, k_st in zip(self.input_space.shape,
                                        self.kernel_shape, self.kernel_stride)
        ]
        dummy_output_sequence_length = dummy_output_shape[2]

        ### Find the space shape after spatial pooling
        dummy_output_shape = [dummy_output_shape[0], dummy_output_shape[1]]
        dummy_detector_space = Conv2DSpace(shape=dummy_output_shape,
                                           num_channels=self.detector_channels,
                                           axes=('c', 0, 1, 'b'))
        dummy_detector = sharedX(
            dummy_detector_space.get_origin_batch(2)[0:16, :, :, :])
        dummy_p = max_pool_c01b(c01b=dummy_detector,
                                pool_shape=self.pool_shape,
                                pool_stride=self.pool_stride)
        dummy_p = dummy_p.eval()

        ### Find the space shape after temporal pooling
        # (0*1,'t')
        dummy_temp_image = [
            dummy_p.shape[1] * dummy_p.shape[2], dummy_output_sequence_length
        ]
        self.temp_pool_input_shape = dummy_temp_image
        dummy_temp_space = Conv2DSpace(shape=dummy_temp_image,
                                       num_channels=self.detector_channels,
                                       axes=('c', 0, 1, 'b'))
        temp_input = sharedX(
            dummy_temp_space.get_origin_batch(2)[0:16, :, :, :])
        dummy_temp_p = temporal_max_pool_c01b(
            c01b=temp_input,
            pool_shape=[1, self.pool_shape[2]],
            pool_stride=[1, self.pool_stride[2]],
            image_shape=dummy_temp_image)
        dummy_temp_p = dummy_temp_p.eval()
        self.output_space = Conv3DSpace(
            shape=[dummy_p.shape[1], dummy_p.shape[2], dummy_temp_p.shape[2]],
            num_channels=self.num_channels,
            axes=('b', 0, 1, 't', 'c'))

        # Print spaces
        print "Input shape: ", self.input_space.shape
        print "Detector space: ", self.detector_space.shape
        print "Output space: ", self.output_space.shape
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        # set up detector space and initialize transformer
        setup_detector_layer_b01tc(layer=self,
                                   input_space=space,
                                   rng=self.mlp.rng,
                                   irange=self.irange)
        rng = self.mlp.rng
        detector_shape = self.detector_space.shape

        #def handle_pool_shape(idx):
        #    if self.pool_shape[idx] < 1:
        #        raise ValueError("bad pool shape: " + str(self.pool_shape))
        #    if self.pool_shape[idx] > detector_shape[idx]:
        #        if self.fix_pool_shape:
        #            assert detector_shape[idx] > 0
        #            self.pool_shape[idx] = detector_shape[idx]
        #        else:
        #            raise ValueError("Pool shape exceeds detector layer shape on axis %d" % idx)
        #map(handle_pool_shape, [0, 1, 2])

        ### Check some precondition
        assert self.pool_shape[0] == self.pool_shape[1]
        assert self.pool_stride[0] == self.pool_stride[1]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)
        for i in xrange(0, 2):
            assert self.pool_stride[i] <= self.pool_shape[i]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)

        dummy_shape = [self.input_space.shape[0], self.input_space.shape[1]]

        # added to find out output space shape after temporal and spatial pooling "max_pool_c01b"
        dummy_output_shape = [
            int(np.ceil((i_sh + 2. * self.pad - k_sh) / float(k_st))) + 1
            for i_sh, k_sh, k_st in zip(dummy_shape, self.kernel_shape,
                                        self.kernel_stride)
        ]

        dummy_output_shape = [dummy_output_shape[0], dummy_output_shape[1]]
        #print dummy_output_shape
        dummy_detector_space = Conv2DSpace(shape=dummy_output_shape,
                                           num_channels=self.detector_channels,
                                           axes=('c', 0, 1, 'b'))

        # picked only 16 channels and 1 image in order to do a fast dummy maxpooling (16 because Alex's code needs at least 16 channels)
        dummy_detector = sharedX(
            dummy_detector_space.get_origin_batch(2)[0:16, :, :, :])

        dummy_p = max_pool_c01b(c01b=dummy_detector,
                                pool_shape=self.pool_shape,
                                pool_stride=self.pool_stride)
        dummy_p = dummy_p.eval()

        # set space after temporal pooling with overlap
        if self.pool_temporal_stride[1] > self.pool_temporal_shape[1]:
            if self.fix_pool_stride:
                warnings.warn("Fixing the pool stride")
                ps = self.pool_temporal_shape[1]
                assert isinstance(ps, py_integer_types)
                self.pool_stride = [1, ps]
            else:
                raise ValueError("Stride too big.")
        # (0*1,'t')
        dummy_temp_image = [(dummy_p.shape[1] * dummy_p.shape[2]),
                            self.detector_space.shape[2]]
        #overlapped temporal max pooling image_shape
        self.temp_pool_input_shape = dummy_temp_image
        dummy_temp_space = Conv2DSpace(shape=dummy_temp_image,
                                       num_channels=self.detector_channels,
                                       axes=('c', 0, 1, 'b'))
        temp_input = sharedX(
            dummy_temp_space.get_origin_batch(2)[0:16, :, :, :])
        dummy_temp_p = temporal_max_pool_c01b(
            c01b=temp_input,
            pool_shape=self.pool_temporal_shape,
            pool_stride=self.pool_temporal_stride,
            image_shape=dummy_temp_image)
        dummy_temp_p = dummy_temp_p.eval()

        self.output_space = Conv3DSpace(
            shape=[dummy_p.shape[1], dummy_p.shape[2], dummy_temp_p.shape[2]],
            num_channels=self.num_channels,
            axes=('b', 0, 1, 't', 'c'))

        # Print spaces
        print "Input shape: ", self.input_space.shape
        print "Detector space: ", self.detector_space.shape
        print "Output space: ", self.output_space.shape
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        # set up detector space and initialize transformer
        setup_detector_layer_btc01(layer=self,
                                   input_space=space,
                                   rng=self.mlp.rng,
                                   irange=self.irange)
        rng = self.mlp.rng
        detector_shape = self.detector_space.shape

        print "detector shape:", self.detector_space.shape

        #def handle_pool_shape(idx):
        #    if self.pool_shape[idx] < 1:
        #        raise ValueError("bad pool shape: " + str(self.pool_shape))
        #    if self.pool_shape[idx] > detector_shape[idx]:
        #        if self.fix_pool_shape:
        #            assert detector_shape[idx] > 0
        #            self.pool_shape[idx] = detector_shape[idx]
        #        else:
        #            raise ValueError("Pool shape exceeds detector layer shape on axis %d" % idx)
        #map(handle_pool_shape, [0, 1, 2])

        ### Check some precondition
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)
        for i in xrange(0, 3):
            assert self.pool_stride[i] <= self.pool_shape[i]
        assert all(
            isinstance(elem, py_integer_types) for elem in self.pool_stride)

        dummy_shape = [
            self.input_space.shape[0], self.input_space.shape[1],
            self.input_space.shape[2]
        ]
        dummy_output_shape = [
            int((i_sh + 2. * p_sh - k_sh) / float(k_st)) + 1
            for i_sh, p_sh, k_sh, k_st in zip(
                dummy_shape, self.pad, self.kernel_shape, self.kernel_stride)
        ]

        dummy_output_shape = [dummy_output_shape[0], dummy_output_shape[1]]
        #print dummy_output_shape
        dummy_detector_space = Conv2DSpace(shape=dummy_output_shape,
                                           num_channels=self.detector_channels,
                                           axes=('b', 'c', 0, 1))
        dummy_detector = sharedX(
            dummy_detector_space.get_origin_batch(2)[:, :, :, :])
        dummy_p = dnn_pool(img=dummy_detector,
                           ws=tuple(self.pool_shape[:2]),
                           stride=tuple(self.pool_stride[:2]))

        print "bshape", dummy_detector_space.get_origin_batch(
            2)[:, :, :, :].shape, self.pool_shape[:2], self.pool_stride[:2]
        dummy_p = dummy_p.eval()

        print "ashape", dummy_p.shape, [1, self.pool_shape[2]
                                        ], [1, self.pool_stride[2]]
        dummy_temp_image = [(dummy_p.shape[2] * dummy_p.shape[3]),
                            self.detector_space.shape[2]]
        self.temp_pool_input_shape = dummy_temp_image
        dummy_temp_space = Conv2DSpace(shape=dummy_temp_image,
                                       num_channels=self.detector_channels,
                                       axes=('b', 'c', 0, 1))
        temp_input = sharedX(dummy_temp_space.get_origin_batch(2)[:, :, :, :])
        dummy_temp_p = dnn_pool(img=temp_input,
                                ws=tuple([1, self.pool_shape[2]]),
                                stride=tuple([1, self.pool_stride[2]]))
        dummy_temp_p = dummy_temp_p.eval()
        print "ashape2:", dummy_temp_p.shape

        self.output_space = Conv3DSpace(
            shape=[dummy_p.shape[2], dummy_p.shape[3], dummy_temp_p.shape[3]],
            num_channels=self.num_channels,
            axes=('b', 'c', 0, 1, 't'))

        # Print spaces
        print "Input shape: ", self.input_space.shape
        print "Detector space: ", self.detector_space.shape
        print "Output space: ", self.output_space.shape