Ejemplo n.º 1
0
    def create_layers(self, shape, crop_size=8, conv_args=None, fc_args=None):
        '''Creates layers

        conv_args are in format (dim_h, f_size, stride, pad, batch_norm, dropout, nonlinearity, pool)
        fc_args are in format (dim_h, batch_norm, dropout, nonlinearity)

        Args:
            shape: Shape of input.
            crop_size: Size of crops
            conv_args: List of tuple of convolutional arguments.
            fc_args: List of tuple of fully-connected arguments.
        '''

        self.crop_size = crop_size

        dim_x, dim_y, dim_in = shape
        if dim_x != dim_y:
            raise ValueError('x and y dimensions must be the same to use Folded encoders.')

        self.final_size = 2 * (dim_x // self.crop_size) - 1

        self.unfold = Unfold(dim_x, self.crop_size)
        self.refold = Fold(dim_x, self.crop_size)

        shape = (self.crop_size, self.crop_size, dim_in)

        self.conv_layers, self.conv_shape = self.create_conv_layers(shape, conv_args)

        dim_x, dim_y, dim_out = self.conv_shape
        dim_r = dim_x * dim_y * dim_out
        self.reshape = View(-1, dim_r)
        self.fc_layers, _ = self.create_linear_layers(dim_r, fc_args)
Ejemplo n.º 2
0
Archivo: resnet.py Proyecto: thu-jw/DIM
    def create_layers(self,
                      shape,
                      conv_before_args=None,
                      res_args=None,
                      conv_after_args=None,
                      fc_args=None):
        '''Creates layers

        Args:
            shape: Shape of the input.
            conv_before_args: Arguments for convolutional layers before residuals.
            res_args: Residual args.
            conv_after_args: Arguments for convolutional layers after residuals.
            fc_args: Fully-connected arguments.

        '''

        dim_x, dim_y, dim_in = shape
        shape = (dim_x, dim_y, dim_in)
        self.conv_before_layers, self.conv_before_shape = self.create_conv_layers(
            shape, conv_before_args)
        self.res_layers, self.res_shape = self.create_res_layers(
            self.conv_before_shape, res_args)
        self.conv_after_layers, self.conv_after_shape = self.create_conv_layers(
            self.res_shape, conv_after_args)

        dim_x, dim_y, dim_out = self.conv_after_shape
        dim_r = dim_x * dim_y * dim_out
        self.reshape = View(-1, dim_r)
        self.fc_layers, _ = self.create_linear_layers(dim_r, fc_args)
Ejemplo n.º 3
0
    def create_layers(self, shape, conv_args=None, fc_args=None):
        '''Creates layers

        conv_args are in format (dim_h, f_size, stride, pad, batch_norm, dropout, nonlinearity, pool)
        fc_args are in format (dim_h, batch_norm, dropout, nonlinearity)

        Args:
            shape: Shape of input.
            conv_args: List of tuple of convolutional arguments.
            fc_args: List of tuple of fully-connected arguments.
        '''

        self.conv_layers, self.conv_shape = self.create_conv_layers(shape, conv_args)

        dim_x, dim_y, dim_out = self.conv_shape
        dim_r = dim_x * dim_y * dim_out
        self.reshape = View(-1, dim_r)
        self.fc_layers, _ = self.create_linear_layers(dim_r, fc_args)
Ejemplo n.º 4
0
Archivo: resnet.py Proyecto: thu-jw/DIM
    def create_layers(self,
                      shape,
                      crop_size=8,
                      conv_before_args=None,
                      res_args=None,
                      conv_after_args=None,
                      fc_args=None):
        '''Creates layers

        Args:
            shape: Shape of the input.
            crop_size: Size of the crops.
            conv_before_args: Arguments for convolutional layers before residuals.
            res_args: Residual args.
            conv_after_args: Arguments for convolutional layers after residuals.
            fc_args: Fully-connected arguments.

        '''
        self.crop_size = crop_size

        dim_x, dim_y, dim_in = shape
        self.final_size = 2 * (dim_x // self.crop_size) - 1

        self.unfold = Unfold(dim_x, self.crop_size)
        self.refold = Fold(dim_x, self.crop_size)

        shape = (self.crop_size, self.crop_size, dim_in)
        self.conv_before_layers, self.conv_before_shape = self.create_conv_layers(
            shape, conv_before_args)

        self.res_layers, self.res_shape = self.create_res_layers(
            self.conv_before_shape, res_args)
        self.conv_after_layers, self.conv_after_shape = self.create_conv_layers(
            self.res_shape, conv_after_args)
        self.conv_after_shape = self.res_shape

        dim_x, dim_y, dim_out = self.conv_after_shape
        dim_r = dim_x * dim_y * dim_out
        self.reshape = View(-1, dim_r)
        self.fc_layers, _ = self.create_linear_layers(dim_r, fc_args)
Ejemplo n.º 5
0
Archivo: convnet.py Proyecto: zizai/DIM
    def handle_layer(self, block, shape, layer, layer_type):
        '''Handles the layer arguments and adds layer to the block.

        Args:
            block: nn.Sequential to add modules to.
            shape: Shape of the input.
            layer: Layer arguments.
            layer_type: Type of layer.

        Returns:
            tuple: Output shape.

        '''
        args = layer.pop('args', None)
        if layer_type == 'linear':
            if len(shape) == 3:
                dim_x, dim_y, dim_out = shape
                shape = (dim_x * dim_y * dim_out, )
                block.add_module('flatten', View(-1, shape[0]))
            bn = layer.get('bn', False)
            bias = layer.pop('bias', None)
            init = layer.pop('init', None)
            init_args = layer.pop('init_args', {})
            shape = self.add_linear_layer(block,
                                          shape,
                                          args=args,
                                          bn=bn,
                                          bias=bias,
                                          init=init,
                                          init_args=init_args)
        elif layer_type == 'conv':
            if len(shape) == 1:
                shape = (1, 1, shape[0])
                block.add_module('expand', Expand2d())
            bn = layer.get('bn', False)
            bias = layer.pop('bias', None)
            init = layer.pop('init', None)
            init_args = layer.pop('init_args', {})
            shape = self.add_conv_layer(block,
                                        shape,
                                        args=args,
                                        bn=bn,
                                        bias=bias,
                                        init=init,
                                        init_args=init_args)
        elif layer_type == 'tconv':
            if len(shape) == 1:
                raise ValueError('Transpose conv needs 4d input')
            bn = layer.get('bn', False)
            bias = layer.pop('bias', True)
            shape = self.add_tconv_layer(block,
                                         shape,
                                         args=args,
                                         bn=bn,
                                         bias=bias)
        elif layer_type == 'flatten':
            if len(shape) == 3:
                dim_x, dim_y, dim_out = shape
                shape = (dim_x * dim_y * dim_out, )
            block.add_module(layer_type, View(-1, shape[0]))
        elif layer_type == 'reshape':
            if args is None:
                raise ValueError('reshape needs args')
            new_shape = args
            dim_new = 1
            dim_out = 1
            for s in new_shape:
                dim_new *= s
            for s in shape:
                dim_out *= s
            if dim_new != dim_out:
                raise ValueError(
                    'New shape {} not compatible with old shape {}.'.format(
                        new_shape, shape))
            block.add_module(layer_type, View((-1, ) + new_shape))
            shape = new_shape[::-1]
        elif layer_type is None:
            pass
        else:
            raise NotImplementedError('Layer {} not supported. Use {}'.format(
                layer_type, self._supported_types))
        return shape