Exemple #1
0
    def __init__(self,
                 input_shape=(3, 32, 32),
                 n_kers=(32, ),
                 ker_sz=(9, ),
                 dense_interior_units=(100, ),
                 pooling_sizes=(2, ),
                 pooling_strides=(2, ),
                 n_classes=10,
                 wt_scale=1e-3,
                 reg=0,
                 verbose=True):
        '''
        Parameters:
        -----------
        input_shape: tuple. Shape of a SINGLE input sample (no mini-batch). By default: (n_chans, img_y, img_x)
        n_kers: tuple. Number of kernels/units in the 1st convolution layer. Format is (32,), which is a tuple
            rather than just an int. The reasoning is that if you wanted to create another Conv2D layer, say with 16
            units, n_kers would then be (32, 16). Thus, this format easily allows us to make the net deeper.
        ker_sz: tuple. x/y size of each convolution filter. Format is (7,), which means make 7x7 filters in the FIRST
            Conv2D layer. If we had another Conv2D layer with filters size 5x5, it would be ker_sz=(7,5)
        dense_interior_units: tuple. Number of hidden units in each dense layer. Same format as above.
            NOTE: Does NOT include the output layer, which has # units = # classes.
        pooling_sizes: tuple. Pooling extent in the i-th MaxPooling2D layer.  Same format as above.
        pooling_strides: tuple. Pooling stride in the i-th MaxPooling2D layer.  Same format as above.
        n_classes: int. Number of classes in the input. This will become the number of units in the Output Dense layer.
        wt_scale: float. Global weight scaling to use for all layers with weights
        reg: float. Regularization strength
        verbose: bool. Do we want to term network-related debug print outs on?
            NOTE: This is different than per-layer verbose settings, which are turned manually on below.

        TODO:
        1. Assemble the layers of the network and add them (in order) to `self.layers`.
        2. Remember to define self.wt_layer_inds as the list indicies in self.layers that have weights.
        '''
        super().__init__(reg, verbose)

        n_chans, h, w = input_shape

        # 1) Input convolutional layer

        C = accelerated_layer.Conv2DAccel(number=0,
                                          name="Conv",
                                          n_kers=n_kers[0],
                                          ker_sz=ker_sz[0],
                                          n_chans=n_chans,
                                          wt_scale=wt_scale,
                                          activation="relu",
                                          reg=reg,
                                          verbose=False)

        self.layers.append(C)

        # 2) 2x2 max pooling layer

        P = accelerated_layer.MaxPooling2DAccel(number=1,
                                                name="Pool",
                                                pool_size=pooling_sizes[0],
                                                strides=pooling_strides[0],
                                                activation="linear",
                                                reg=reg,
                                                verbose=False)

        self.layers.append(P)

        # 3) Dense layer

        pool_net_act_size_x = filter_ops.get_pooling_out_shape(
            w, pooling_sizes[0], pooling_strides[0])
        #print("pool_net_act_size_x: ",pool_net_act_size_x)
        pool_net_act_size_y = filter_ops.get_pooling_out_shape(
            h, pooling_sizes[0], pooling_strides[0])
        #print("pool_net_act_size_y: ",pool_net_act_size_y)
        #print("n_kers: ",n_kers[0])
        pool_net_act_size = pool_net_act_size_x * pool_net_act_size_x * n_kers[
            0]
        #print("pool_net_act_size: ",pool_net_act_size)

        D = layer.Dense(number=2,
                        name="Dense",
                        units=dense_interior_units[0],
                        n_units_prev_layer=pool_net_act_size,
                        wt_scale=wt_scale,
                        activation="relu",
                        reg=reg,
                        verbose=False)

        self.layers.append(D)

        # 4) Dense softmax output layer

        O = layer.Dense(number=3,
                        name="Output",
                        units=n_classes,
                        n_units_prev_layer=dense_interior_units[0],
                        wt_scale=wt_scale,
                        activation="softmax",
                        reg=reg,
                        verbose=False)

        self.layers.append(O)

        self.wt_layer_inds = [0, 2, 3]
Exemple #2
0
    def __init__(self,
                 input_shape=(3, 32, 32),
                 n_kers=(32, ),
                 ker_sz=(7, ),
                 dense_interior_units=(100, ),
                 pooling_sizes=(2, ),
                 pooling_strides=(2, ),
                 n_classes=10,
                 wt_scale=1e-3,
                 reg=0,
                 verbose=True):
        '''
        Parameters:
        -----------
        input_shape: tuple. Shape of a SINGLE input sample (no mini-batch). By default: (n_chans, img_y, img_x)
        n_kers: tuple. Number of kernels/units in the 1st convolution layer. Format is (32,), which is a tuple
            rather than just an int. The reasoning is that if you wanted to create another Conv2D layer, say with 16
            units, n_kers would then be (32, 16). Thus, this format easily allows us to make the net deeper.
        ker_sz: tuple. x/y size of each convolution filter. Format is (7,), which means make 7x7 filters in the FIRST
            Conv2D layer. If we had another Conv2D layer with filters size 5x5, it would be ker_sz=(7,5)
        dense_interior_units: tuple. Number of hidden units in each dense layer. Same format as above.
            NOTE: Does NOT include the output layer, which has # units = # classes.
        pooling_sizes: tuple. Pooling extent in the i-th MaxPooling2D layer.  Same format as above.
        pooling_strides: tuple. Pooling stride in the i-th MaxPooling2D layer.  Same format as above.
        n_classes: int. Number of classes in the input. This will become the number of units in the Output Dense layer.
        wt_scale: float. Global weight scaling to use for all layers with weights
        reg: float. Regularization strength
        verbose: bool. Do we want to term network-related debug print outs on?
            NOTE: This is different than per-layer verbose settings, which are turned manually on below.

        TODO:
        1. Assemble the layers of the network and add them (in order) to `self.layers`.
        2. Remember to define self.wt_layer_inds as the list indicies in self.layers that have weights.
        '''

        super().__init__(reg, verbose)

        n_chans, h, w = input_shape
        # 1) Input convolutional layer
        self.layers.append(
            accelerated_layer.Conv2DAccel(len(self.layers),
                                          'Conv2',
                                          n_kers[0],
                                          ker_sz[0],
                                          n_chans,
                                          wt_scale,
                                          activation='relu',
                                          reg=reg,
                                          verbose=verbose))
        # 2) 2x2 max pooling layer
        self.layers.append(
            accelerated_layer.MaxPooling2DAccel(len(self.layers), 'MaxPool',
                                                pooling_sizes[0],
                                                pooling_strides[0], 'linear',
                                                reg, verbose))
        # 3) Dense layer
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseRelu', dense_interior_units[0],
                        (filter_ops.get_pooling_out_shape(
                            w, pooling_sizes[0], pooling_strides[0])**2) *
                        n_kers[0], wt_scale, 'relu', reg, verbose))

        # 3.5) Another dense layer, why not?
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseRelu2',
                        dense_interior_units[0], self.layers[-1].get_units(),
                        wt_scale, 'relu', reg, verbose))

        # 4) Dense softmax output layer
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseSoftMax', n_classes,
                        self.layers[-1].get_units(), wt_scale, 'softmax', reg,
                        verbose))

        #only the indices of layers that have weights
        self.wt_layer_inds = [0, 2, 3, 4]
Exemple #3
0
    def load_model(self, filename):
        # TODO: read layer.save_model texts and make a new layer and put them in self.layers
        # work in progress
        assert filename[-4:] == ".txt", "file must be of .txt extension"

        with open(filename, 'r') as f:
            print("loading ", f.readline())

            self.layers = []

            layer_data = f.readline().split('\t')
            for i, layer_info in enumerate(layer_data[:-1]):
                layer_info = layer_info.split('@')
                if layer_info[0] == "Dense":
                    L = layer.Dense(number=i,
                                    name="Dense" + str(i),
                                    units=1,
                                    n_units_prev_layer=1,
                                    wt_scale=1,
                                    activation=layer_info[1],
                                    reg=int(layer_info[2]),
                                    verbose=eval(layer_info[3]))
                    L.wts = np.asarray(eval(layer_info[4]))
                    L.b = np.asarray(eval(layer_info[5]))
                elif layer_info[0] == "Conv2D":
                    L = accelerated_layer.Conv2DAccel(number=i,
                                                      name="Conv" + str(i),
                                                      n_kers=1,
                                                      ker_sz=1,
                                                      n_chans=1,
                                                      wt_scale=1,
                                                      activation=layer_info[1],
                                                      reg=int(layer_info[2]),
                                                      verbose=eval(
                                                          layer_info[3]))
                    L.wts = np.asarray(eval(layer_info[4]))
                    L.b = np.asarray(eval(layer_info[5]))
                elif layer_info[0] == "MaxPooling2D":
                    L = accelerated_layer.MaxPooling2DAccel(
                        number=i,
                        name="Pool" + str(i),
                        pool_size=int(layer_info[1]),
                        strides=int(layer_info[2]),
                        activation=layer_info[3],
                        reg=int(layer_info[4]),
                        verbose=eval(layer_info[5]))
                self.layers.append(L)
            '''

            layer0 = f.readline().split('@')
            self.layers[0].wts = np.fromstring(layer0[0])
            self.layers[0].b = np.fromstring(layer0[1])

            layer2 = f.readline().split('@')
            self.layers[2].wts = np.fromstring(layer2[0])
            self.layers[2].b = np.fromstring(layer2[1])

            layer3 = f.readline().split('@')
            self.layers[3].wts = np.fromstring(layer3[0])
            self.layers[3].b = np.fromstring(layer3[1])
            '''

            self.wt_layer_inds = eval(f.readline())
            self.reg = int(f.readline())
            self.verbose = eval(f.readline())
            self.loss_history = eval(f.readline())
            self.train_acc_history = eval(f.readline())
            self.validation_acc_history = eval(f.readline())