Пример #1
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              dilation=1,
              stride=2,
              padding=1,
              output_padding=1,
              activation='PReLU',
              bias=False,
              dropout_prob=0.1):
     super(UpsamplingBottleNeck, self).__init__()
     internal_channels = in_channels // 4
     self.conv_down = Sequential(
         Conv2d(in_channels,
                internal_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.conv_main = Sequential(
         ConvTranspose2d(internal_channels,
                         internal_channels,
                         kernel_size=kernel_size,
                         stride=stride,
                         padding=padding,
                         output_padding=output_padding,
                         dilation=dilation,
                         bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.conv_up = Sequential(
         Conv2d(internal_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.main_conv = Sequential(
         Conv2d(in_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels))
     self.mainmaxunpool = MaxUnpool2d(kernel_size=2, stride=2, padding=0)
     self.regularizer = Dropout2d(p=dropout_prob)
     self.out_activation = PReLU() if activation == 'PReLU' else ReLU()
Пример #2
0
    def __init__(self,
                 vocab: Vocabulary,
                 sentence_encoder: SentenceEncoder,
                 tan_ffnn: FeedForward,
                 inject_predicate: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None):
        super(SpanToTanModel, self).__init__(vocab, regularizer)
        self._sentence_encoder = sentence_encoder
        self._tan_ffnn = tan_ffnn
        self._inject_predicate = inject_predicate

        self._span_extractor = EndpointSpanExtractor(
            input_dim=self._sentence_encoder.get_output_dim(),
            combination="x,y")
        prediction_input_dim = (3 * self._sentence_encoder.get_output_dim()
                                ) if self._inject_predicate else (
                                    2 *
                                    self._sentence_encoder.get_output_dim())
        self._tan_pred = TimeDistributed(
            Sequential(
                Linear(prediction_input_dim, self._tan_ffnn.get_input_dim()),
                ReLU(), self._tan_ffnn,
                Linear(self._tan_ffnn.get_output_dim(),
                       self.vocab.get_vocab_size("tan-string-labels"))))
        self._metric = BinaryF1()
Пример #3
0
    def __init__(self,
                 input_dim=13,
                 num_classes=9,
                 d_model=64,
                 n_head=2,
                 n_layers=5,
                 d_inner=128,
                 activation="relu",
                 dropout=0.017998950510888446,
                 max_len=200):

        super(PETransformerModel, self).__init__()
        self.modelname = f"PeTransformerEncoder_input-dim={input_dim}_num-classes={num_classes}_" \
                         f"d-model={d_model}_d-inner={d_inner}_n-layers={n_layers}_n-head={n_head}_" \
                         f"dropout={dropout}"

        encoder_layer = TransformerEncoderLayer(d_model, n_head, d_inner,
                                                dropout, activation)
        encoder_norm = LayerNorm(d_model)

        self.inlinear = Linear(input_dim, d_model)
        self.relu = ReLU()
        self.transformerencoder = TransformerEncoder(encoder_layer, n_layers,
                                                     encoder_norm)
        self.flatten = Flatten()
        self.outlinear = Linear(d_model, num_classes)
        self.pe = PositionalEncoding(d_model, max_len=max_len)
        """
Пример #4
0
    def __init__(self,
                 vocab: Vocabulary,
                 sentence_encoder: SentenceEncoder,
                 question_encoder: SlotSequenceEncoder,
                 span_selector: PruningSpanSelector,
                 classify_invalids: bool = True,
                 invalid_hidden_dim: int = 100,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None):
        super(QuestionToSpanModel, self).__init__(vocab, regularizer)
        self._sentence_encoder = sentence_encoder
        self._question_encoder = question_encoder
        self._span_selector = span_selector
        self._classify_invalids = classify_invalids
        self._invalid_hidden_dim = invalid_hidden_dim

        injected_embedding_dim = self._sentence_encoder.get_output_dim(
        ) + self._question_encoder.get_output_dim()
        extra_input_dim = self._span_selector.get_extra_input_dim()
        if injected_embedding_dim != extra_input_dim:
            raise ConfigurationError(
                "Sum of pred rep and question embedding dim %s did not match span selector injection dim of %s"
                % (injected_embedding_dim, extra_input_dim))

        if self._classify_invalids:
            self._invalid_pred = Sequential(
                Linear(extra_input_dim, self._invalid_hidden_dim), ReLU(),
                Linear(self._invalid_hidden_dim, 1))
            self._invalid_metric = BinaryF1()
Пример #5
0
def mini_vgg_features():
    model = Sequential(
        # 32 x 32
        Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),

        # 16 x 16
        Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),

        # 8 x 8
        Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),

        # 4 x 4
    )
    return model
Пример #6
0
def mini_vgg_decoder():
    model = Sequential(
        # 8 x 8
        UpsamplingBilinear2d(scale_factor=2),
        ConvTranspose2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        ConvTranspose2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        ConvTranspose2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),

        # 16 x 16
        UpsamplingBilinear2d(scale_factor=2),
        ConvTranspose2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        ConvTranspose2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),

        # 32 x 32
        UpsamplingBilinear2d(scale_factor=2),
        ConvTranspose2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        ConvTranspose2d(64, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        Tanh(),
    )
    return model
Пример #7
0
    def __init__(self,
                 input_dim: int,
                 extra_input_dim: int = 0,
                 span_hidden_dim: int = 100,
                 span_ffnn: FeedForward = None,
                 classifier: SetClassifier = SetBinaryClassifier(),
                 span_decoding_threshold: float = 0.05,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None):
        super(SpanSelector, self).__init__()

        self._input_dim = input_dim
        self._extra_input_dim = extra_input_dim
        self._span_hidden_dim = span_hidden_dim
        self._span_ffnn = span_ffnn
        self._classifier = classifier
        self._span_decoding_threshold = span_decoding_threshold

        self._span_hidden = SpanRepAssembly(self._input_dim, self._input_dim, self._span_hidden_dim)
        if self._span_ffnn is not None:
            if self._span_ffnn.get_input_dim() != self._span_hidden_dim:
                raise ConfigurationError(
                    "Span hidden dim %s must match span classifier FFNN input dim %s" % (
                        self._span_hidden_dim, self._span_ffnn.get_input_dim()
                    )
                )
            self._span_scorer = TimeDistributed(
                torch.nn.Sequential(
                    ReLU(),
                    self._span_ffnn,
                    Linear(self._span_ffnn.get_output_dim(), 1)))
        else:
            self._span_scorer = TimeDistributed(
                torch.nn.Sequential(
                    ReLU(),
                    Linear(self._span_hidden_dim, 1)))
        self._span_pruner = Pruner(self._span_scorer)

        if self._extra_input_dim > 0:
            self._extra_input_lin = Linear(self._extra_input_dim, self._span_hidden_dim)
Пример #8
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              dilation=1,
              stride=1,
              padding=1,
              activation='PReLU',
              bias=False,
              asymmetric=False,
              dropout_prob=0):
     super(RegularBottleNeck, self).__init__()
     internal_channels = in_channels // 4
     self.conv_down = Sequential(
         Conv2d(in_channels,
                internal_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     if asymmetric is False:
         self.conv_main = Sequential(
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=kernel_size,
                    dilation=dilation,
                    stride=stride,
                    padding=padding,
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU())
     else:
         self.conv_main = Sequential(
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=(kernel_size, 1),
                    dilation=dilation,
                    stride=stride,
                    padding=(padding, 0),
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU(),
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=(1, kernel_size),
                    dilation=dilation,
                    stride=stride,
                    padding=(0, padding),
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU())
     self.conv_up = Sequential(
         Conv2d(internal_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.regularizer = Dropout2d(p=dropout_prob)
     self.out_activation = PReLU() if activation == 'PReLU' else ReLU()
Пример #9
0
 def __init__(self,
              in_channels,
              out_channels,
              activation='PReLU',
              bias=False):
     super(InitialBlock, self).__init__()
     self.conv = Conv2d(in_channels=in_channels,
                        out_channels=out_channels - 3,
                        kernel_size=3,
                        stride=2,
                        padding=1)
     self.maxpooling = MaxPool2d(kernel_size=2, stride=2, padding=0)
     self.bnActivate = Sequential(
         BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
Пример #10
0
    def __init__(self,
                 input_dim=13,
                 num_classes=9,
                 sequencelength=13,
                 d_model=64,
                 n_head=1,
                 n_layers=3,
                 d_inner=256,
                 activation="relu",
                 dropout=0.39907201621346594):

        super(TransformerModel, self).__init__()
        self.modelname = f"TransformerEncoder_input-dim={input_dim}_num-classes={num_classes}_" \
                         f"d-model={d_model}_d-inner={d_inner}_n-layers={n_layers}_n-head={n_head}_" \
                         f"dropout={dropout}"

        encoder_layer = TransformerEncoderLayer(d_model, n_head, d_inner,
                                                dropout, activation)
        encoder_norm = LayerNorm(d_model)

        self.sequential = Sequential(
            Linear(input_dim, d_model), ReLU(),
            TransformerEncoder(encoder_layer, n_layers, encoder_norm),
            Flatten(), ReLU(), Linear(d_model * sequencelength, num_classes))
Пример #11
0
 def __init__(self, features):
     super(bna, self).__init__()
     self.batchnorm = BatchNorm2d(features)
     self.activate = ReLU(inplace=True)
Пример #12
0
    def __init__(
            self,
            input_dim: int,
            extra_input_dim: int = 0,
            span_hidden_dim: int = 100,
            span_ffnn: FeedForward = None,
            objective: str = "binary",
            gold_span_selection_policy: str = "union",
            pruning_ratio: float = 2.0,
            skip_metrics_during_training: bool = True,
            # metric: SpanMetric = SpanMetric(),
            initializer: InitializerApplicator = InitializerApplicator(),
            regularizer: Optional[RegularizerApplicator] = None):
        super(PruningSpanSelector, self).__init__()

        self._input_dim = input_dim
        self._span_hidden_dim = span_hidden_dim
        self._extra_input_dim = extra_input_dim
        self._span_ffnn = span_ffnn
        self._pruning_ratio = pruning_ratio
        self._objective = objective
        self._gold_span_selection_policy = gold_span_selection_policy
        self._skip_metrics_during_training = skip_metrics_during_training

        if objective not in objective_values:
            raise ConfigurationError(
                "QA objective must be one of the following: " +
                str(qa_objective_values))

        if gold_span_selection_policy not in gold_span_selection_policy_values:
            raise ConfigurationError(
                "QA span selection policy must be one of the following: " +
                str(qa_objective_values))

        if objective == "multinomial" and gold_span_selection_policy == "weighted":
            raise ConfigurationError(
                "Cannot use weighted span selection policy with multinomial objective."
            )

        # self._metric = metric

        self._span_hidden = SpanRepAssembly(input_dim, input_dim,
                                            self._span_hidden_dim)

        if self._span_ffnn is not None:
            if self._span_ffnn.get_input_dim() != self._span_hidden_dim:
                raise ConfigurationError(
                    "Span hidden dim %s must match span classifier FFNN input dim %s"
                    % (self._span_hidden_dim, self._span_ffnn.get_input_dim()))
            self._span_scorer = TimeDistributed(
                torch.nn.Sequential(
                    ReLU(), self._span_ffnn,
                    Linear(self._span_ffnn.get_output_dim(), 1)))
        else:
            self._span_scorer = TimeDistributed(
                torch.nn.Sequential(ReLU(), Linear(self._span_hidden_dim, 1)))

        self._span_pruner = Pruner(self._span_scorer)

        if self._extra_input_dim > 0:
            self._extra_input_lin = Linear(self._extra_input_dim,
                                           self._span_hidden_dim)
Пример #13
0
 def __init__(self, c_in, c_out, filter_size, stride=1, padding=0, **kwargs):
     super(Conv2dBN, self).__init__()
     self.conv = Conv2d(c_in, c_out, filter_size, stride=stride, padding=padding, **kwargs)
     self.bn = BatchNorm2d(c_out)
     self.relu = ReLU()