Ejemplo n.º 1
0
    def __init__(self,
                 basic_cell,
                 input_size,
                 hidden_size,
                 bias=True,
                 num_layers=1):
        super(BaseCell, self).__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.bias = bias
        self.num_layers = num_layers

        self.cells = nn.ModuleList()
        for i in range(num_layers):
            if i == 0:
                self.cells.append(
                    basic_cell(input_size=input_size,
                               hidden_size=hidden_size,
                               bias=bias))
            else:
                self.cells.append(
                    basic_cell(input_size=hidden_size,
                               hidden_size=hidden_size,
                               bias=bias))
        init_weights(self.modules())
Ejemplo n.º 2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_class,
                 batch_max_length,
                 from_layer,
                 inner_channels=None,
                 bias=True,
                 activation='relu',
                 inplace=True,
                 dropouts=None,
                 num_fcs=0,
                 pool=None):
        super(FCHead, self).__init__()

        self.num_class = num_class
        self.batch_max_length = batch_max_length
        self.from_layer = from_layer

        if num_fcs > 0:
            inter_fc = FCModules(in_channels, inner_channels, bias, activation,
                                 inplace, dropouts, num_fcs)
            fc = nn.Linear(inner_channels, out_channels)
        else:
            inter_fc = nn.Sequential()
            fc = nn.Linear(in_channels, out_channels)

        if pool is not None:
            self.pool = build_torch_nn(pool)

        self.inter_fc = inter_fc
        self.fc = fc

        logger.info('FCHead init weights')
        init_weights(self.modules())
Ejemplo n.º 3
0
 def __init__(self, spin, k):
     super(SPIN, self).__init__()
     self.body = build_feature_extractor(spin['feature_extractor'])
     self.spn = SPN(spin['spn'])
     self.ain = AIN(spin['ain'])
     self.betas = generate_beta(k)
     init_weights(self.modules())
Ejemplo n.º 4
0
    def __init__(self,
                 cell,
                 generator,
                 num_steps,
                 num_class,
                 input_attention_block=None,
                 output_attention_block=None,
                 text_transform=None,
                 holistic_input_from=None):
        super(AttHead, self).__init__()
        # from vedastr import utils
        # utils.set_random_seed(1)
        if input_attention_block is not None:
            self.input_attention_block = build_brick(input_attention_block)

        self.cell = build_sequence_decoder(cell)
        self.generator = build_torch_nn(generator)
        self.num_steps = num_steps
        self.num_class = num_class

        if output_attention_block is not None:
            self.output_attention_block = build_brick(output_attention_block)

        if text_transform is not None:
            self.text_transform = build_torch_nn(text_transform)

        if holistic_input_from is not None:
            self.holistic_input_from = holistic_input_from

        self.register_buffer('embeddings',
                             torch.diag(torch.ones(self.num_class)))
        logger.info('AttHead init weights')
        init_weights(self.modules())
Ejemplo n.º 5
0
    def __init__(self, layers, zero_init_residual=False,
                 groups=1, width_per_group=64, norm_layer=None):
        super(GResNet, self).__init__()

        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self._norm_layer = norm_layer

        self.dilation = 1
        self.groups = groups
        self.base_width = width_per_group

        self.layers = nn.ModuleList()
        stage_layers = []
        for layer_name, layer_cfg in layers:
            if layer_name == 'conv':
                layer = build_module(layer_cfg)
                self.inplanes = layer_cfg['out_channels']
            elif layer_name == 'pool':
                layer = build_torch_nn(layer_cfg)
            elif layer_name == 'block':
                layer = self._make_layer(**layer_cfg)
            else:
                raise ValueError('Unknown layer name {}'.format(layer_name))
            stride = layer_cfg.get('stride', 1)
            max_stride = stride if isinstance(stride, int) else max(stride)
            if max_stride > 1:
                self.layers.append(nn.Sequential(*stage_layers))
                stage_layers = []
            stage_layers.append(layer)
        self.layers.append(nn.Sequential(*stage_layers))

        logger.info('GResNet init weights')
        init_weights(self.modules())
Ejemplo n.º 6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 atrous_rates,
                 from_layer,
                 to_layer,
                 dropout=None):
        super(ASPP, self).__init__()
        self.from_layer = from_layer
        self.to_layer = to_layer

        modules = []
        modules.append(
            nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
                          nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)))

        rate1, rate2, rate3 = tuple(atrous_rates)
        modules.append(ASPPConv(in_channels, out_channels, rate1))
        modules.append(ASPPConv(in_channels, out_channels, rate2))
        modules.append(ASPPConv(in_channels, out_channels, rate3))
        modules.append(ASPPPooling(in_channels, out_channels))

        self.convs = nn.ModuleList(modules)

        self.project = nn.Sequential(
            nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
        self.with_dropout = dropout is not None
        if self.with_dropout:
            self.dropout = nn.Dropout(dropout)

        logger.info('ASPP init weights')
        init_weights(self.modules())
Ejemplo n.º 7
0
 def __init__(self, neck, fusion=None):
     super().__init__()
     self.neck = build_bricks(neck)
     if fusion:
         self.fusion = build_brick(fusion)
     else:
         self.fusion = None
     logger.info('GFPN init weights')
     init_weights(self.modules())
Ejemplo n.º 8
0
    def __init__(self, decoder_layer, num_layers, position_encoder=None):
        super(TransformerDecoder, self).__init__()

        if position_encoder is not None:
            self.pos_encoder = build_position_encoder(position_encoder)

        self.layers = nn.ModuleList([build_decoder_layer(decoder_layer) for _ in range(num_layers)])

        logger.info('TransformerDecoder init weights')
        init_weights(self.modules())
Ejemplo n.º 9
0
    def __init__(self,
                 from_layer,
                 generator,
                 ):
        super(Head, self).__init__()

        self.from_layer = from_layer
        self.generator = build_module(generator)

        logger.info('Head init weights')
        init_weights(self.modules())
Ejemplo n.º 10
0
    def __init__(self, in_channels, num_class, from_layer, pool=None):
        super(CTCHead, self).__init__()

        self.num_class = num_class
        self.from_layer = from_layer
        fc = nn.Linear(in_channels, num_class)

        # if pool is not None:
        #     self.pool = build_torch_nn(pool)
        self.fc = fc

        logger.info('CTCHead init weights')
        init_weights(self.modules())
Ejemplo n.º 11
0
    def __init__(self, in_channels, out_channels, bins, from_layer, to_layer):
        super(PPM, self).__init__()
        self.from_layer = from_layer
        self.to_layer = to_layer

        self.blocks = nn.ModuleList()
        for bin_ in bins:
            self.blocks.append(
                nn.Sequential(
                    nn.AdaptiveAvgPool2d(bin_),
                    nn.Conv2d(in_channels, out_channels, 1, bias=False),
                    nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)))
        logger.info('PPM init weights')
        init_weights(self.modules())
Ejemplo n.º 12
0
    def __init__(self, num_steps, in_channels, embedding_channels=512, inner_channels=512):
        super(PVABlock, self).__init__()

        self.num_steps = num_steps
        self.in_channels = in_channels
        self.inner_channels = inner_channels
        self.embedding_channels = embedding_channels

        self.order_embeddings = nn.Parameter(torch.randn(self.num_steps, self.embedding_channels), requires_grad=True)

        self.v_linear = nn.Linear(self.in_channels, self.inner_channels, bias=False)
        self.o_linear = nn.Linear(self.embedding_channels, self.inner_channels, bias=False)
        self.e_linear = nn.Linear(self.inner_channels, 1, bias=False)

        init_weights(self.modules())
Ejemplo n.º 13
0
    def __init__(self, input_pool, layers, keep_order=False):
        super(RNN, self).__init__()
        self.keep_order = keep_order

        if input_pool:
            self.input_pool = build_torch_nn(input_pool)

        self.layers = nn.ModuleList()
        for i, (layer_name, layer_cfg) in enumerate(layers):
            if layer_name in ['rnn', 'fc']:
                self.layers.add_module('{}_{}'.format(i, layer_name),
                                       build_torch_nn(layer_cfg))
            else:
                raise ValueError('Unknown layer name {}'.format(layer_name))

        init_weights(self.modules())
Ejemplo n.º 14
0
    def __init__(self, arch, replace_stride_with_dilation=None, multi_grid=None, pretrain=True):
        cfg = MODEL_CFGS[arch]
        super().__init__(
            cfg['block'],
            cfg['layer'],
            replace_stride_with_dilation=replace_stride_with_dilation,
            multi_grid=multi_grid,
        )

        if pretrain:
            logger.info('ResNet init weights from pretreain')
            state_dict = load_state_dict_from_url(cfg['weights_url'])
            self.load_state_dict(state_dict, strict=False)
        else:
            logger.info('ResNet init weights')
            init_weights(self.modules())

        del self.fc, self.avgpool
Ejemplo n.º 15
0
    def __init__(self, layers):
        super(GVGG, self).__init__()

        self.layers = nn.ModuleList()
        stage_layers = []
        for layer_name, layer_cfg in layers:
            if layer_name == 'conv':
                layer = build_module(layer_cfg)
            elif layer_name == 'pool':
                layer = build_torch_nn(layer_cfg)
            else:
                raise ValueError('Unknown layer name {}'.format(layer_name))
            stride = layer_cfg.get('stride', 1)
            max_stride = stride if isinstance(stride, int) else max(stride)
            if max_stride > 1:
                self.layers.append(nn.Sequential(*stage_layers))
                stage_layers = []
            stage_layers.append(layer)
        self.layers.append(nn.Sequential(*stage_layers))

        init_weights(self.modules())
Ejemplo n.º 16
0
    def __init__(
        self,
        decoder,
        generator,
        embedding,
        num_steps,
        pad_id,
        src_from,
        src_mask_from=None,
    ):
        super(TransformerHead, self).__init__()

        self.decoder = build_sequence_decoder(decoder)
        self.generator = build_torch_nn(generator)
        self.embedding = build_torch_nn(embedding)
        self.num_steps = num_steps
        self.pad_id = pad_id
        self.src_from = src_from
        self.src_mask_from = src_mask_from

        logger.info('TransformerHead init weights')
        init_weights(self.modules())