Exemple #1
0
    def __init__(self, name, load_layers=True):
        super().__init__(name)
        if load_layers:
            # 0 Spatial (225, 225) --> (112, 112)
            self.add_layer(ConvLayer("conv0",
                                     filter_block_shape=(64,3,5,5),
                                     with_bias=False, stride=2, padding=1,
                                     weight_regulariser=l2(0.0001)))
            self.add_layer(BatchNormLayer("conv0_bn", input_dimension=4, incoming_chans=64))
            self.add_layer(ReLu("conv0_relu"))

            # 0 Spatial (112, 112) --> (56, 56)
            self.add_layer(PointwiseConvLayer("pw0",
                                     filter_block_shape=(64,64),
                                     with_bias=False, stride=2,
                                     weight_regulariser=l2(0.0001)))
            self.add_layer(BatchNormLayer("pw0_bn", input_dimension=4, incoming_chans=64))
            self.add_layer(ReLu("pw0_relu"))

            # 1 Spatial (56,56) --> (56,56)
            self.add_res_block("res1", (64, 64, 3, 3), depthwise_sep=True)

            # 2 Spatial (56,56) --> (56,56)
            self.add_res_block("res2", (64, 64, 3, 3), depthwise_sep=True)

            # 3 Spatial (56,56) --> (28,28)
            self.add_res_block("res3", (128,64,3,3), downsample=True, depthwise_sep=True)

            # 4
            self.add_res_block("res4", (128, 128, 3, 3), depthwise_sep=True)

            # 6 Spatial (28,28) --> (14,14)
            self.add_res_block("res5", (256,128,3,3), downsample=True, depthwise_sep=True)

            # 7
            self.add_res_block("res6", (256,256,3,3), depthwise_sep=True)

            # 9 Spatial (14,14) --> (7,7)
            self.add_res_block("res7", (512,256,3,3), downsample=True, depthwise_sep=True)

            # 10
            self.add_res_block("res8", (512,512,3,3), depthwise_sep=True)

            # 11 Spatial (7,7) --> (1,)
            self.add_layer(GlobalAveragePoolingLayer("global_pool1"))

            # 12
            self.add_layer(DenseLayer("dense1",
                                    incoming_chans=512,
                                    output_dim=120,
                                    weight_regulariser=l2(0.0001)))
            self.set_loss_layer(SoftmaxWithCrossEntropy("softmax1"))
Exemple #2
0
    def load_from_h5(self, open_f, load_grads=True):
        self.incoming_chans = open_f[self.layer_name +
                                     '/layer_info'].attrs['incoming_chans']
        self.output_dim = open_f[self.layer_name +
                                 '/layer_info'].attrs['output_dim']
        self.with_bias = open_f[self.layer_name +
                                '/layer_info'].attrs['with_bias']

        weight_regulariser_type = open_f[self.layer_name +
                                         '/weights'].attrs.get(
                                             "weight_regulariser_type", None)
        if weight_regulariser_type:
            weight_regulariser_strength = open_f[
                self.layer_name +
                '/weights'].attrs["weight_regulariser_strength"]
            if weight_regulariser_type == b"l2":
                self.weight_regulariser = l2.l2(
                    strength=float(weight_regulariser_strength))

        self.learned_params['weights'] = open_f[self.layer_name +
                                                '/weights'][:]
        if self.with_bias:
            self.learned_params['bias'] = open_f[self.layer_name + '/bias'][:]
        if load_grads:
            self.grads['weights'] = open_f[self.layer_name +
                                           '/grads/weights'][:]
            if self.with_bias:
                self.grads['bias'] = open_f[self.layer_name + '/grads/bias'][:]
Exemple #3
0
    def load_from_h5(self, open_f, load_grads=True):
        self.num_filters = open_f[self.layer_name +
                                  '/layer_info'].attrs['num_filters']
        self.filter_chans = open_f[self.layer_name +
                                   '/layer_info'].attrs['filter_chans']
        self.with_bias = open_f[self.layer_name +
                                '/layer_info'].attrs['with_bias']
        self.f_rows = open_f[self.layer_name + '/layer_info'].attrs['f_rows']
        self.f_cols = open_f[self.layer_name + '/layer_info'].attrs['f_cols']
        self.stride = open_f[self.layer_name + '/layer_info'].attrs['stride']
        self.padding = open_f[self.layer_name + '/layer_info'].attrs['padding']

        weight_regulariser_type = open_f[self.layer_name +
                                         '/weights'].attrs.get(
                                             "weight_regulariser_type", None)
        if weight_regulariser_type:
            weight_regulariser_strength = open_f[
                self.layer_name +
                '/weights'].attrs["weight_regulariser_strength"]
            if weight_regulariser_type == b"l2":
                self.weight_regulariser = l2.l2(
                    strength=float(weight_regulariser_strength))

        self.learned_params['weights'] = open_f[self.layer_name +
                                                '/weights'][:]
        if self.with_bias:
            self.learned_params['bias'] = open_f[self.layer_name + '/bias'][:]
        if load_grads:
            self.grads['weights'] = open_f[self.layer_name +
                                           '/grads/weights'][:]
            if self.with_bias:
                self.grads['bias'] = open_f[self.layer_name + '/grads/bias'][:]
Exemple #4
0
 def add_res_block(self, layer_name, first_filter_block_shape, 
                   downsample=False, weight_regulariser_strength=0.0001, depthwise_sep=False):
     num_filters, incoming_chans, f_rows, f_cols = first_filter_block_shape
     layer_list = []
     if depthwise_sep:
         layer_list += self.depthwise_sep_layer(layer_name + "_dw1", incoming_chans,
                                                first_filter_block_shape,
                                                stride=2 if downsample else 1, padding=1,
                                                depthwise_weight_regulariser=None,
                                                pointwise_weight_regulariser=l2(strength=weight_regulariser_strength),
                                                final_relu=True, add_layers=False)
     else:
         layer_list.append(ConvLayer(layer_name + "_conv1", filter_block_shape=first_filter_block_shape,
                         stride=2 if downsample else 1, padding=1, with_bias=False,
                         weight_regulariser=l2(strength=weight_regulariser_strength)))
         layer_list.append(BatchNormLayer(layer_name + "_bn1", input_dimension=4, incoming_chans=num_filters))
         layer_list.append(ReLu(layer_name + "_relu1"))
     if depthwise_sep:
         layer_list += self.depthwise_sep_layer(layer_name + "_dw2", num_filters,
                                                (num_filters,num_filters,f_rows,f_cols),
                                                stride=1, padding=1,
                                                depthwise_weight_regulariser=None,
                                                pointwise_weight_regulariser=l2(strength=weight_regulariser_strength),
                                                final_relu=False, add_layers=False)
     else:
         layer_list.append(ConvLayer(layer_name + "_conv2", filter_block_shape=(num_filters,num_filters,f_rows,f_cols),
                           stride=1, padding=1, with_bias=False, weight_regulariser=l2(strength=weight_regulariser_strength)))
         layer_list.append(BatchNormLayer(layer_name + "_bn2", input_dimension=4, incoming_chans=num_filters))
     if downsample:
         skip_proj = PointwiseConvLayer(layer_name + "_pw_skip", filter_block_shape=(num_filters,incoming_chans),
                                        stride=2, with_bias=False, weight_regulariser=l2(strength=weight_regulariser_strength))
     else:
         skip_proj = None
     relu2 = ReLu(layer_name + "_relu2")
     self.add_layer(ResidualBlock(layer_name, layer_list=layer_list, 
                                  skip_projection=skip_proj, post_skip_activation=relu2))
Exemple #5
0
    def __init__(self, name, load_layers=True):
        super().__init__(name)
        if load_layers:
            # 0 Spatial (28, 28) --> (28, 28)
            self.add_layer(
                ConvLayer("conv_1",
                          filter_block_shape=(32, 1, 3, 3),
                          with_bias=False,
                          weight_regulariser=l2(0.0001)))
            self.add_layer(BatchNormLayer("bn_1", incoming_chans=32))
            self.add_layer(ReLu("relu_1"))
            # 1 Spatial (28, 28) --> (28, 28)
            self.add_layer(
                ConvLayer("conv_2",
                          filter_block_shape=(32, 32, 3, 3),
                          with_bias=False,
                          weight_regulariser=l2(0.0001)))
            self.add_layer(BatchNormLayer("bn_2", incoming_chans=32))
            self.add_layer(ReLu("relu_2"))
            # 2 Spatial (28, 28) --> (14, 14)
            self.add_layer(
                ConvLayer("conv_3",
                          filter_block_shape=(64, 32, 4, 4),
                          with_bias=False,
                          stride=2,
                          weight_regulariser=l2(0.0001)))
            self.add_layer(BatchNormLayer("bn_3", incoming_chans=64))
            self.add_layer(ReLu("relu_3"))
            # 3 Spatial (14, 14) --> (14, 14)
            self.add_layer(
                ConvLayer("conv_4",
                          filter_block_shape=(64, 64, 3, 3),
                          with_bias=False,
                          weight_regulariser=l2(0.0001)))
            self.add_layer(BatchNormLayer("bn_4", incoming_chans=64))
            self.add_layer(ReLu("relu_4"))
            # 4 Spatial (14, 14) --> (7, 7)
            self.add_layer(
                ConvLayer("conv_5",
                          filter_block_shape=(128, 64, 4, 4),
                          with_bias=False,
                          stride=2,
                          weight_regulariser=l2(0.0001)))
            self.add_layer(BatchNormLayer("bn_5", incoming_chans=128))
            self.add_layer(ReLu("relu_4"))
            # Spatial (7, 7) --> (1,)
            self.add_layer(GlobalAveragePoolingLayer("global_pool"))

            self.add_layer(
                DenseLayer("dense_1",
                           incoming_chans=128,
                           output_dim=10,
                           weight_regulariser=l2(0.0005)))
            self.set_loss_layer(SoftmaxWithCrossEntropy("softmax"))