예제 #1
0
 def generate_random_sub_block(self, input_shape, layer_type):
     if layer_type == self.SUB_BLOCK_TYPES.FLATTEN:
         return [
             LayerBlock(
                 input_shape=input_shape,
                 parent_block=self,
                 layer_type=SupportedLayers.FLATTEN,
             )
         ]
     elif layer_type == self.SUB_BLOCK_TYPES.HIDDENDENSE:
         return [
             LayerBlock(
                 input_shape=input_shape,
                 parent_block=self,
                 layer_type=SupportedLayers.HIDDENDENSE,
             )
         ]
     elif layer_type == self.SUB_BLOCK_TYPES.DROPOUT:
         return [
             LayerBlock(
                 input_shape=input_shape,
                 parent_block=self,
                 layer_type=SupportedLayers.DROPOUT,
                 args={dropout_args.RATE: self.DROPOUT_RATE_MAX},
             )
         ]
     return []
예제 #2
0
    def generate_constrained_input_sub_blocks(self, input_shape):
        import random

        layers = []
        if random.randint(0, 1):
            layers = [
                LayerBlock(
                    input_shape=input_shape,
                    parent_block=self,
                    layer_type=SupportedLayers.SAMEMAXPOOL2D,
                )
            ]
        if len(layers):
            layers.append(
                LayerBlock(
                    input_shape=layers[0].get_output_shape(),
                    parent_block=self,
                    layer_type=SupportedLayers.POINTWISECONV2D,
                ))
        else:
            layers.append(
                LayerBlock(
                    input_shape=input_shape,
                    parent_block=self,
                    layer_type=SupportedLayers.POINTWISECONV2D,
                ))
        return layers
예제 #3
0
    def generate_constrained_input_sub_blocks(self, input_shape):
        Channel_in = randrange(10)
        Channel_out = randrange(10)
        Channel_in = 2 ^ Channel_in
        Channel_out = 2 ^ Channel_out
        if (input_shape[-1] % 2) == 0:
            bottleneck_factor = 2
        else:
            bottleneck_factor = 1

        layers = []
        if bottleneck_factor != 1:
            layers.append(
                LayerBlock(
                    input_shape=input_shape,
                    parent_block=self,
                    layer_type=SupportedLayers.POINTWISECONV2D,
                    #args={conv_args.FILTERS: int(input_shape[-1] / bottleneck_factor)},
                    args={conv_args.FILTERS: Channel_in},
                ))
        layers.append(
            LayerBlock(
                input_shape=layers[-1].get_output_shape()
                if bottleneck_factor != 1 else input_shape,
                parent_block=self,
                layer_type=SupportedLayers.DEPTHWISECONV2D,
                args={conv_args.KERNEL_SIZE: (1, 3)},
            ))
        layers.append(
            LayerBlock(
                input_shape=layers[-1].get_output_shape(),
                parent_block=self,
                layer_type=SupportedLayers.MAXPOOL2D,
                args={
                    pool_args.POOL_SIZE: (2, 1),
                    pool_args.STRIDES: (2, 1),
                    pool_args.PADDING: ArgPadding.SAME,
                },
            ))
        layers.append(
            LayerBlock(
                input_shape=layers[-1].get_output_shape(),
                parent_block=self,
                layer_type=SupportedLayers.DEPTHWISECONV2D,
                args={conv_args.KERNEL_SIZE: (3, 1)},
            ))
        if bottleneck_factor != 1:
            layers.append(
                LayerBlock(
                    input_shape=layers[-1].get_output_shape(),
                    parent_block=self,
                    layer_type=SupportedLayers.POINTWISECONV2D,
                    args={
                        conv_args.KERNEL_SIZE: (bottleneck_factor, 1),
                        conv_args.FILTERS: Channel_out,
                    },
                ))
        return layers
예제 #4
0
 def generate_random_sub_block(self, input_shape, layer_type):
     pwconv_block = LayerBlock(
         input_shape=input_shape,
         parent_block=self,
         layer_type=SupportedLayers.POINTWISECONV2D,
     )
     dwconv_block = LayerBlock(
         input_shape=input_shape,
         parent_block=self,
         layer_type=SupportedLayers.DEPTHWISECONV2D,
     )
     return [pwconv_block, dwconv_block]
예제 #5
0
 def generate_constrained_input_sub_blocks(self, input_shape):
     # TODO do not make it manually append but instead return a list of blocks
     return [
         LayerBlock(input_shape=None,
                    parent_block=self,
                    layer_type=SupportedLayers.FLATTEN)
     ]
예제 #6
0
 def generate_constrained_input_sub_blocks(self, input_shape):
     return [
         LayerBlock(
             input_shape=input_shape,
             parent_block=self,
             layer_type=SupportedLayers.CONV2D,
         )
     ]
예제 #7
0
 def generate_random_sub_block(self, input_shape, layer_type):
     # TODO figure out how to do non sequential models
     """
     Fire blocks consist of a pointwise conv layer followed by any number of parallel separable 2d conv layers.
     This collection of separable conv layers is known as an expansion block.
     """
     pwconv_block = LayerBlock(
         input_shape=input_shape,
         parent_block=self,
         layer_type=SupportedLayers.POINTWISECONV2D,
     )
     expand_block = ExpandBlock(
         input_shape=pwconv_block.get_output_shape(),
         parent_block=self,
         layer_type=layer_type,
     )
     return [pwconv_block, expand_block]
예제 #8
0
 def generate_random_sub_block(self, input_shape, layer_type):
     if layer_type == self.SUB_BLOCK_TYPES.SEPARABLE_CONV:
         return [
             LayerBlock(
                 input_shape=input_shape,
                 parent_block=self,
                 layer_type=SupportedLayers.SEPARABLECONV2D,
             )
         ]
예제 #9
0
 def generate_random_sub_block(self, input_shape, layer_type):
     if layer_type == self.SUB_BLOCK_TYPES.CONV2D:
         return [
             LayerBlock(
                 input_shape=input_shape,
                 parent_block=self,
                 layer_type=SupportedLayers.CONV2D,
             )
         ]
     elif layer_type == self.SUB_BLOCK_TYPES.MAXPOOL2D:
         return [
             LayerBlock(
                 input_shape=input_shape,
                 parent_block=self,
                 layer_type=SupportedLayers.MAXPOOL2D,
             )
         ]
     return []
예제 #10
0
 def generate_random_sub_block(self, input_shape, layer_type):
     return [
         LayerBlock(
             input_shape=None,
             parent_block=self,
             layer_type=SupportedLayers.HIDDENDENSE,
             args={dense_args.UNITS: self.class_count},
         )
     ]
예제 #11
0
    def generate_constrained_output_sub_blocks(self, input_shape):
        residual_channel_depth = input_shape[-1]
        bottleneck_filters = residual_channel_depth // 4

        layers = []
        layers.append(
            LayerBlock(
                input_shape=input_shape,
                parent_block=self,
                layer_type=SupportedLayers.GROUPEDPOINTWISECONV2D,
                args={
                    conv_args.FILTERS: bottleneck_filters,
                    conv_args.ACTIVATION: ArgActivations.RELU,
                    conv_args.PADDING: ArgPadding.SAME,
                },
            ))
        layers.append(
            LayerBlock(
                input_shape=layers[-1].get_output_shape(),
                parent_block=self,
                layer_type=SupportedLayers.SHUFFLE,
            ))
        layers.append(
            LayerBlock(
                input_shape=layers[-1].get_output_shape(),
                parent_block=self,
                layer_type=SupportedLayers.DEPTHWISECONV2D,
                args={
                    conv_args.KERNEL_SIZE: (3, 3),
                    conv_args.PADDING: ArgPadding.SAME,
                },
            ))
        layers.append(
            LayerBlock(
                input_shape=layers[-1].get_output_shape(),
                parent_block=self,
                layer_type=SupportedLayers.GROUPEDPOINTWISECONV2D,
                args={
                    conv_args.FILTERS: residual_channel_depth,
                    conv_args.ACTIVATION: ArgActivations.RELU,
                },
            ))
        return layers
예제 #12
0
 def generate_constrained_output_sub_blocks(self, input_shape):
     """Use of input_shape=None causes the input shape to be resolved from the previous layer."""
     return [
         LayerBlock(
             input_shape=None,
             parent_block=self,
             layer_type=SupportedLayers.OUTPUTDENSE,
             args={dense_args.UNITS: self.class_count},
         )
     ]
예제 #13
0
 def generate_constrained_input_sub_blocks(self, input_shape):
     return [
         LayerBlock(
             input_shape=input_shape,
             parent_block=self,
             layer_type=SupportedLayers.CONV2D,
             args={
                 conv_args.FILTERS: 16,
                 conv_args.PADDING: ArgPadding.SAME
             },
         )
     ]
예제 #14
0
 def generate_random_sub_block(self, input_shape, layer_type):
     """
     Care must be taken here that the input shape is the input to the Expand block as all layers are in parallel
     and thus take the same input, ie. the input to the block.
     """
     if layer_type == self.SUB_BLOCK_TYPES.CONV2D:
         return [
             LayerBlock(
                 input_shape=self.input_shape,
                 parent_block=self,
                 layer_type=SupportedLayers.SAMECONV2D,
             )
         ]
     return []