Ejemplo n.º 1
0
    def compute_net_in(self):
        '''Computes fast 2D max pooling net-in using reshaping (partitioning input into small windows).
        '''
        mini_batch_sz, n_chans, img_y, img_x = self.input.shape

        out_x = filter_ops.get_pooling_out_shape(img_x, self.pool_size,
                                                 self.strides)
        out_y = filter_ops.get_pooling_out_shape(img_y, self.pool_size,
                                                 self.strides)

        # Partition the input into pool_sz chunks, then take the max within each chunk
        self.input_reshaped = self.input.reshape(mini_batch_sz, n_chans, out_y,
                                                 self.pool_size, out_x,
                                                 self.pool_size)
        self.net_in = self.input_reshaped.max(axis=3).max(axis=4)
Ejemplo n.º 2
0
    def __init__(self,
                 input_shape=(3, 32, 32),
                 n_kers=(32, ),
                 ker_sz=(7, ),
                 dense_interior_units=(100, ),
                 pooling_sizes=(2, ),
                 pooling_strides=(2, ),
                 n_classes=10,
                 wt_scale=1e-3,
                 reg=0,
                 verbose=True):
        '''
        Parameters:
        -----------
        input_shape: tuple. Shape of a SINGLE input sample (no mini-batch). By default: (n_chans, img_y, img_x)
        n_kers: tuple. Number of kernels/units in the 1st convolution layer. Format is (32,), which is a tuple
            rather than just an int. The reasoning is that if you wanted to create another Conv2D layer, say with 16
            units, n_kers would then be (32, 16). Thus, this format easily allows us to make the net deeper.
        ker_sz: tuple. x/y size of each convolution filter. Format is (7,), which means make 7x7 filters in the FIRST
            Conv2D layer. If we had another Conv2D layer with filters size 5x5, it would be ker_sz=(7,5)
        dense_interior_units: tuple. Number of hidden units in each dense layer. Same format as above.
            NOTE: Does NOT include the output layer, which has # units = # classes.
        pooling_sizes: tuple. Pooling extent in the i-th MaxPooling2D layer.  Same format as above.
        pooling_strides: tuple. Pooling stride in the i-th MaxPooling2D layer.  Same format as above.
        n_classes: int. Number of classes in the input. This will become the number of units in the Output Dense layer.
        wt_scale: float. Global weight scaling to use for all layers with weights
        reg: float. Regularization strength
        verbose: bool. Do we want to term network-related debug print outs on?
            NOTE: This is different than per-layer verbose settings, which are turned manually on below.

        TODO:
        1. Assemble the layers of the network and add them (in order) to `self.layers`.
        2. Remember to define self.wt_layer_inds as the list indicies in self.layers that have weights.
        '''

        super().__init__(reg, verbose)

        n_chans, h, w = input_shape
        # 1) Input convolutional layer
        self.layers.append(
            accelerated_layer.Conv2DAccel(len(self.layers),
                                          'Conv2',
                                          n_kers[0],
                                          ker_sz[0],
                                          n_chans,
                                          wt_scale,
                                          activation='relu',
                                          reg=reg,
                                          verbose=verbose))
        # 2) 2x2 max pooling layer
        self.layers.append(
            accelerated_layer.MaxPooling2DAccel(len(self.layers), 'MaxPool',
                                                pooling_sizes[0],
                                                pooling_strides[0], 'linear',
                                                reg, verbose))
        # 3) Dense layer
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseRelu', dense_interior_units[0],
                        (filter_ops.get_pooling_out_shape(
                            w, pooling_sizes[0], pooling_strides[0])**2) *
                        n_kers[0], wt_scale, 'relu', reg, verbose))

        # 3.5) Another dense layer, why not?
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseRelu2',
                        dense_interior_units[0], self.layers[-1].get_units(),
                        wt_scale, 'relu', reg, verbose))

        # 4) Dense softmax output layer
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseSoftMax', n_classes,
                        self.layers[-1].get_units(), wt_scale, 'softmax', reg,
                        verbose))

        #only the indices of layers that have weights
        self.wt_layer_inds = [0, 2, 3, 4]
Ejemplo n.º 3
0
    def __init__(self,
                 input_shape=(3, 32, 32),
                 n_kers=(32, ),
                 ker_sz=(9, ),
                 dense_interior_units=(100, ),
                 pooling_sizes=(2, ),
                 pooling_strides=(2, ),
                 n_classes=10,
                 wt_scale=1e-3,
                 reg=0,
                 verbose=True):
        '''
        Parameters:
        -----------
        input_shape: tuple. Shape of a SINGLE input sample (no mini-batch). By default: (n_chans, img_y, img_x)
        n_kers: tuple. Number of kernels/units in the 1st convolution layer. Format is (32,), which is a tuple
            rather than just an int. The reasoning is that if you wanted to create another Conv2D layer, say with 16
            units, n_kers would then be (32, 16). Thus, this format easily allows us to make the net deeper.
        ker_sz: tuple. x/y size of each convolution filter. Format is (7,), which means make 7x7 filters in the FIRST
            Conv2D layer. If we had another Conv2D layer with filters size 5x5, it would be ker_sz=(7,5)
        dense_interior_units: tuple. Number of hidden units in each dense layer. Same format as above.
            NOTE: Does NOT include the output layer, which has # units = # classes.
        pooling_sizes: tuple. Pooling extent in the i-th MaxPooling2D layer.  Same format as above.
        pooling_strides: tuple. Pooling stride in the i-th MaxPooling2D layer.  Same format as above.
        n_classes: int. Number of classes in the input. This will become the number of units in the Output Dense layer.
        wt_scale: float. Global weight scaling to use for all layers with weights
        reg: float. Regularization strength
        verbose: bool. Do we want to term network-related debug print outs on?
            NOTE: This is different than per-layer verbose settings, which are turned manually on below.

        TODO:
        1. Assemble the layers of the network and add them (in order) to `self.layers`.
        2. Remember to define self.wt_layer_inds as the list indicies in self.layers that have weights.
        '''
        super().__init__(reg, verbose)

        n_chans, h, w = input_shape

        # 1) Input convolutional layer

        C = layer.Conv2DAccel(number=0,
                              name="Conv",
                              n_kers=n_kers[0],
                              ker_sz=ker_sz[0],
                              n_chans=n_chans,
                              wt_scale=wt_scale,
                              activation="relu",
                              reg=reg,
                              verbose=False)

        self.layers.append(C)

        # 2) 2x2 max pooling layer

        P = layer.MaxPooling2D(number=1,
                               name="Pool",
                               pool_size=pooling_sizes[0],
                               strides=pooling_strides[0],
                               activation="linear",
                               reg=reg,
                               verbose=False)

        self.layers.append(P)

        # 3) Dense layer

        pool_net_act_size_x = filter_ops.get_pooling_out_shape(
            w, pooling_sizes[0], pooling_strides[0])
        #print("pool_net_act_size_x: ",pool_net_act_size_x)
        pool_net_act_size_y = filter_ops.get_pooling_out_shape(
            h, pooling_sizes[0], pooling_strides[0])
        #print("pool_net_act_size_y: ",pool_net_act_size_y)
        #print("n_kers: ",n_kers[0])
        pool_net_act_size = pool_net_act_size_x * pool_net_act_size_x * n_kers[
            0]
        #print("pool_net_act_size: ",pool_net_act_size)

        D = layer.Dense(number=2,
                        name="Dense",
                        units=dense_interior_units[0],
                        n_units_prev_layer=pool_net_act_size,
                        wt_scale=wt_scale,
                        activation="relu",
                        reg=reg,
                        verbose=False)

        self.layers.append(D)

        # 4) Dense softmax output layer

        O = layer.Dense(number=3,
                        name="Output",
                        units=n_classes,
                        n_units_prev_layer=dense_interior_units[0],
                        wt_scale=wt_scale,
                        activation="softmax",
                        reg=reg,
                        verbose=False)

        self.layers.append(O)

        self.wt_layer_inds = [0, 2, 3]