Beispiel #1
0
def create_mobilenetv2_ssd_lite(num_classes,
                                width_mult=1.0,
                                use_batch_norm=True,
                                onnx_compatible=False,
                                is_test=False,
                                quantize=False):
    base_net = MobileNetV2(width_mult=width_mult,
                           use_batch_norm=use_batch_norm,
                           onnx_compatible=onnx_compatible).features

    source_layer_indexes = [
        GraphPath(14, 'conv', 3),
        19,
    ]
    extras = ModuleList([
        InvertedResidual(1280, 512, stride=2, expand_ratio=0.2),
        InvertedResidual(512, 256, stride=2, expand_ratio=0.25),
        InvertedResidual(256, 256, stride=2, expand_ratio=0.5),
        InvertedResidual(256, 64, stride=2, expand_ratio=0.25)
    ])

    regression_headers = ModuleList([
        SeperableConv2d(in_channels=round(576 * width_mult),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=1280,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1),
    ])

    classification_headers = ModuleList([
        SeperableConv2d(in_channels=round(576 * width_mult),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=1280,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=64, out_channels=6 * num_classes, kernel_size=1),
    ])
    if quantize:
        from utils.quantize import quantize
        config.priors = quantize(config.priors,
                                 num_bits=8,
                                 min_value=float(config.priors.min()),
                                 max_value=float(config.priors.max()))
    return SSD(num_classes,
               base_net,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config)
Beispiel #2
0
class TabTransformerCombiner(Combiner):
    def __init__(self,
                 input_features: Dict[str, "InputFeature"] = None,
                 config: TabTransformerCombinerConfig = None,
                 **kwargs):
        super().__init__(input_features)
        self.name = "TabTransformerCombiner"
        logger.debug(f"Initializing {self.name}")

        if config.reduce_output is None:
            raise ValueError("TabTransformer requires the `reduce_output` "
                             "parameter")
        self.reduce_output = config.reduce_output
        self.reduce_sequence = SequenceReducer(
            reduce_mode=config.reduce_output,
            max_sequence_length=len(input_features),
            encoding_size=config.hidden_size)
        self.supports_masking = True

        self.embed_input_feature_name = config.embed_input_feature_name
        if self.embed_input_feature_name:
            vocab = [
                i_f for i_f in input_features
                if input_features[i_f].type() != NUMBER
                or input_features[i_f].type() != BINARY
            ]
            if self.embed_input_feature_name == "add":
                self.embed_i_f_name_layer = Embed(vocab,
                                                  config.hidden_size,
                                                  force_embedding_size=True)
                projector_size = config.hidden_size
            elif isinstance(self.embed_input_feature_name, int):
                if self.embed_input_feature_name > config.hidden_size:
                    raise ValueError("TabTransformer parameter "
                                     "`embed_input_feature_name` "
                                     "specified integer value ({}) "
                                     "needs to be smaller than "
                                     "`hidden_size` ({}).".format(
                                         self.embed_input_feature_name,
                                         config.hidden_size))
                self.embed_i_f_name_layer = Embed(
                    vocab,
                    self.embed_input_feature_name,
                    force_embedding_size=True,
                )
                projector_size = config.hidden_size - self.embed_input_feature_name
            else:
                raise ValueError("TabTransformer parameter "
                                 "`embed_input_feature_name` "
                                 "should be either None, an integer or `add`, "
                                 "the current value is "
                                 "{}".format(self.embed_input_feature_name))
        else:
            projector_size = config.hidden_size

        logger.debug("  Projectors")
        self.unembeddable_features = []
        self.embeddable_features = []
        for i_f in input_features:
            if input_features[i_f].type in {NUMBER, BINARY}:
                self.unembeddable_features.append(i_f)
            else:
                self.embeddable_features.append(i_f)

        self.projectors = ModuleList()
        for i_f in self.embeddable_features:
            flatten_size = self.get_flatten_size(
                input_features[i_f].output_shape)
            self.projectors.append(Linear(flatten_size[0], projector_size))

        # input to layer_norm are the encoder outputs for unembeddable features,
        # which are number or binary features.  These should be 2-dim
        # tensors.  Size should be concatenation of these tensors.
        concatenated_unembeddable_encoders_size = 0
        for i_f in self.unembeddable_features:
            concatenated_unembeddable_encoders_size += input_features[
                i_f].output_shape[0]

        self.layer_norm = torch.nn.LayerNorm(
            concatenated_unembeddable_encoders_size)

        logger.debug("  TransformerStack")
        self.transformer_stack = TransformerStack(
            input_size=config.hidden_size,
            max_sequence_length=len(self.embeddable_features),
            hidden_size=config.hidden_size,
            # todo: can we just use projector_size? # hidden_size,
            num_heads=config.num_heads,
            output_size=config.transformer_output_size,
            num_layers=config.num_layers,
            dropout=config.dropout,
        )

        logger.debug("  FCStack")

        # determine input size to fully connected layer based on reducer
        if config.reduce_output == "concat":
            fc_input_size = len(self.embeddable_features) * config.hidden_size
        else:
            fc_input_size = self.reduce_sequence.output_shape[-1] if len(
                self.embeddable_features) > 0 else 0
        self.fc_stack = FCStack(
            fc_input_size + concatenated_unembeddable_encoders_size,
            layers=config.fc_layers,
            num_layers=config.num_fc_layers,
            default_output_size=config.output_size,
            default_use_bias=config.use_bias,
            default_weights_initializer=config.weights_initializer,
            default_bias_initializer=config.bias_initializer,
            default_norm=config.norm,
            default_norm_params=config.norm_params,
            default_activation=config.fc_activation,
            default_dropout=config.fc_dropout,
            fc_residual=config.fc_residual,
        )

        self._empty_hidden = torch.empty([1, 0])
        self._embeddable_features_indices = torch.arange(
            0, len(self.embeddable_features))

        # Create empty tensor of shape [1, 0] to use as hidden in case there are no category or numeric/binary features.
        self.register_buffer("empty_hidden", self._empty_hidden)
        self.register_buffer("embeddable_features_indices",
                             self._embeddable_features_indices)

    @staticmethod
    def get_flatten_size(output_shape: torch.Size) -> torch.Size:
        size = torch.prod(torch.Tensor([*output_shape]))
        return torch.Size([size.type(torch.int32)])

    @property
    def output_shape(self) -> torch.Size:
        return self.fc_stack.output_shape

    def forward(
            self,
            inputs: Dict,  # encoder outputs
    ) -> Dict:
        unembeddable_encoder_outputs = [
            inputs[k]["encoder_output"] for k in inputs
            if k in self.unembeddable_features
        ]
        embeddable_encoder_outputs = [
            inputs[k]["encoder_output"] for k in inputs
            if k in self.embeddable_features
        ]

        batch_size = (embeddable_encoder_outputs[0].shape[0]
                      if len(embeddable_encoder_outputs) > 0 else
                      unembeddable_encoder_outputs[0].shape[0])

        # ================ Project & Concat embeddables ================
        if len(embeddable_encoder_outputs) > 0:

            # ============== Flatten =================
            embeddable_encoder_outputs = [
                torch.reshape(eo, [batch_size, -1])
                for eo in embeddable_encoder_outputs
            ]

            projected = [
                self.projectors[i](eo)
                for i, eo in enumerate(embeddable_encoder_outputs)
            ]
            hidden = torch.stack(projected)  # num_eo, bs, h
            hidden = torch.permute(hidden, (1, 0, 2))  # bs, num_eo, h

            if self.embed_input_feature_name:
                i_f_names_idcs = torch.reshape(
                    torch.arange(0,
                                 len(embeddable_encoder_outputs),
                                 device=self.device), [-1, 1])
                embedded_i_f_names = self.embed_i_f_name_layer(i_f_names_idcs)
                embedded_i_f_names = torch.unsqueeze(embedded_i_f_names, dim=0)
                embedded_i_f_names = torch.tile(embedded_i_f_names,
                                                [batch_size, 1, 1])
                if self.embed_input_feature_name == "add":
                    hidden = hidden + embedded_i_f_names
                else:
                    hidden = torch.cat([hidden, embedded_i_f_names], -1)

            # ================ Transformer Layers ================
            hidden = self.transformer_stack(hidden)

            # ================ Sequence Reduction ================
            hidden = self.reduce_sequence(hidden)
        else:
            # create empty tensor because there are no category features
            hidden = torch.empty([batch_size, 0], device=self.device)

        # ================ Concat Skipped ================
        if len(unembeddable_encoder_outputs) > 0:
            unembeddable_encoder_outputs = [
                torch.reshape(eo, [batch_size, -1])
                for eo in unembeddable_encoder_outputs
            ]
            # ================ Flatten ================
            if len(unembeddable_encoder_outputs) > 1:
                unembeddable_hidden = torch.cat(
                    unembeddable_encoder_outputs,
                    -1)  # tf.keras.layers.concatenate
            else:
                unembeddable_hidden = list(unembeddable_encoder_outputs)[0]
            unembeddable_hidden = self.layer_norm(unembeddable_hidden)

        else:
            # create empty tensor because there are not numeric/binary features
            unembeddable_hidden = torch.tile(self.empty_hidden,
                                             [batch_size, 0])

        # ================ Concat Skipped and Others ================
        hidden = torch.cat([hidden, unembeddable_hidden], -1)

        # ================ FC Layers ================
        hidden = self.fc_stack(hidden)

        return_data = {"combiner_output": hidden}

        if len(inputs) == 1:
            for key, value in [d for d in inputs.values()][0].items():
                if key != "encoder_output":
                    return_data[key] = value

        return return_data

    @staticmethod
    def get_schema_cls():
        return TabTransformerCombinerConfig
Beispiel #3
0
class unetr(ModelBase):
    """
    This is the standard U-Net architecture : https://arxiv.org/pdf/1606.06650.pdf. The 'residualConnections' flag controls residual connections The Downsampling, Encoding, Decoding modules
    are defined in the seg_modules file. These smaller modules are basically defined by 2 parameters, the input channels (filters) and the output channels (filters),
    and some other hyperparameters, which remain constant all the modules. For more details on the smaller modules please have a look at the seg_modules file.
    """

    def __init__(
        self,
        parameters: dict,
    ):
        super(unetr, self).__init__(parameters)

        if not ("inner_patch_size" in parameters["model"]):
            parameters["model"]["inner_patch_size"] = parameters["patch_size"][0]
            print("Default inner patch size set to %d." % parameters["patch_size"][0])

        if "inner_patch_size" in parameters["model"]:
            if np.ceil(np.log2(parameters["model"]["inner_patch_size"])) != np.floor(
                np.log2(parameters["model"]["inner_patch_size"])
            ):
                sys.exit("The inner patch size must be a power of 2.")

        self.patch_size = parameters["model"]["inner_patch_size"]
        self.depth = int(np.log2(self.patch_size))
        patch_check = checkPatchDimensions(parameters["patch_size"], self.depth)

        if patch_check != self.depth and patch_check >= 2:
            print(
                "The image size is not large enough for desired depth. It is expected that each dimension of the image is divisible by 2^i, where i is in a integer greater than or equal to 2. Only the first %d layers will run."
                % patch_check
            )
        elif patch_check < 2:
            sys.exit(
                "The image size is not large enough for desired depth. It is expected that each dimension of the image is divisible by 2^i, where i is in a integer greater than or equal to 2."
            )

        if not ("num_heads" in parameters["model"]):
            parameters["model"]["num_heads"] = 12
            print(
                "Default number of heads in multi-head self-attention (MSA) set to 12."
            )

        if not ("embed_dim" in parameters["model"]):
            parameters["model"]["embed_dim"] = 768
            print("Default size of embedded dimension set to 768.")

        if self.n_dimensions == 2:
            self.img_size = parameters["patch_size"][0:2]
        elif self.n_dimensions == 3:
            self.img_size = parameters["patch_size"]

        self.num_layers = 3 * self.depth  # number of transformer layers
        self.out_layers = np.arange(2, self.num_layers, 3)
        self.num_heads = parameters["model"]["num_heads"]
        self.embed_size = parameters["model"]["embed_dim"]
        self.patch_dim = [i // self.patch_size for i in self.img_size]

        if not all([i % self.patch_size == 0 for i in self.img_size]):
            sys.exit(
                "The image size is not divisible by the patch size in at least 1 dimension. UNETR is not defined in this case."
            )
        if not all([self.patch_size <= i for i in self.img_size]):
            sys.exit("The inner patch size must be smaller than the input image.")

        self.transformer = _Transformer(
            img_size=self.img_size,
            patch_size=self.patch_size,
            in_feats=self.n_channels,
            embed_size=self.embed_size,
            num_heads=self.num_heads,
            mlp_dim=2048,
            num_layers=self.num_layers,
            out_layers=self.out_layers,
            Conv=self.Conv,
            Norm=self.Norm,
        )

        self.upsampling = ModuleList([])
        self.convs = ModuleList([])

        for i in range(0, self.depth - 1):
            # add deconv blocks
            tempconvs = nn.Sequential()
            tempconvs.add_module(
                "conv0",
                _DeconvConvBlock(
                    self.embed_size,
                    32 * 2**self.depth,
                    self.Norm,
                    self.Conv,
                    self.ConvTranspose,
                ),
            )

            for j in range(self.depth - 2, i, -1):
                tempconvs.add_module(
                    "conv%d" % j,
                    _DeconvConvBlock(
                        128 * 2**j,
                        128 * 2 ** (j - 1),
                        self.Norm,
                        self.Conv,
                        self.ConvTranspose,
                    ),
                )

            self.convs.append(tempconvs)

            # add upsampling
            self.upsampling.append(
                _UpsampleBlock(
                    128 * 2 ** (i + 1), self.Norm, self.Conv, self.ConvTranspose
                )
            )

        # add upsampling for transformer output (no convs)
        self.upsampling.append(
            self.ConvTranspose(
                in_channels=self.embed_size,
                out_channels=32 * 2**self.depth,
                kernel_size=2,
                stride=2,
                padding=0,
                output_padding=0,
            )
        )

        self.input_conv = nn.Sequential()
        self.input_conv.add_module(
            "conv1", _ConvBlock(self.n_channels, 32, self.Norm, self.Conv)
        )
        self.input_conv.add_module("conv2", _ConvBlock(32, 64, self.Norm, self.Conv))

        self.output_conv = nn.Sequential()
        self.output_conv.add_module("conv1", _ConvBlock(128, 64, self.Norm, self.Conv))
        self.output_conv.add_module("conv2", _ConvBlock(64, 64, self.Norm, self.Conv))
        self.output_conv.add_module(
            "conv3",
            out_conv(
                64,
                self.n_classes,
                conv_kwargs={
                    "kernel_size": 1,
                    "stride": 1,
                    "padding": 0,
                    "bias": False,
                },
                norm=self.Norm,
                conv=self.Conv,
                final_convolution_layer=self.final_convolution_layer,
                sigmoid_input_multiplier=self.sigmoid_input_multiplier,
            ),
        )

    def forward(self, x):
        """
        Parameters
        ----------
        x : Tensor
            Should be a 5D Tensor as [batch_size, channels, x_dims, y_dims, z_dims].

        Returns
        -------
        x : Tensor
            Returns a 5D Output Tensor as [batch_size, n_classes, x_dims, y_dims, z_dims].

        """
        transformer_out = self.transformer(x)

        y = self.upsampling[-1](
            transformer_out[-1]
            .transpose(-1, -2)
            .view(-1, self.embed_size, *self.patch_dim)
        )  # z12

        for i in range(len(self.convs) - 1, -1, -1):
            zi = (
                transformer_out[i]
                .transpose(-1, -2)
                .view(-1, self.embed_size, *self.patch_dim)
            )
            zi = self.convs[i](zi)
            zicat = torch.cat([zi, y], dim=1)
            y = self.upsampling[i](zicat)

        x = self.input_conv(x)
        x = torch.cat([x, y], dim=1)
        x = self.output_conv(x)

        return x
Beispiel #4
0
 def __init__(self, in_channels, out_channels):
     super().__init__()
     self.convs = ModuleList(
         [SAGEConv(in_channels, 128),
          SAGEConv(128, out_channels)])
Beispiel #5
0
class CNV(Module):

    def __init__(self, num_classes, weight_bit_width, act_bit_width, in_bit_width, in_ch):
        super(CNV, self).__init__()

        self.conv_features = ModuleList()
        self.linear_features = ModuleList()

        self.conv_features.append(QuantIdentity( # for Q1.7 input format
            act_quant=CommonActQuant,
            bit_width=in_bit_width,
            min_val=- 1.0,
            max_val=1.0 - 2.0 ** (-7),
            narrow_range=False,
            restrict_scaling_type=RestrictValueType.POWER_OF_TWO))

        for out_ch, is_pool_enabled in CNV_OUT_CH_POOL:
            self.conv_features.append(QuantConv2d(
                kernel_size=KERNEL_SIZE,
                in_channels=in_ch,
                out_channels=out_ch,
                bias=False,
                weight_quant=CommonWeightQuant,
                weight_bit_width=weight_bit_width))
            in_ch = out_ch
            self.conv_features.append(BatchNorm2d(in_ch, eps=1e-4))
            self.conv_features.append(QuantIdentity(
                act_quant=CommonActQuant,
                bit_width=act_bit_width))
            if is_pool_enabled:
                self.conv_features.append(MaxPool2d(kernel_size=2))

        for in_features, out_features in INTERMEDIATE_FC_FEATURES:
            self.linear_features.append(QuantLinear(
                in_features=in_features,
                out_features=out_features,
                bias=False,
                weight_quant=CommonWeightQuant,
                weight_bit_width=weight_bit_width))
            self.linear_features.append(BatchNorm1d(out_features, eps=1e-4))
            self.linear_features.append(QuantIdentity(
                act_quant=CommonActQuant,
                bit_width=act_bit_width))

        self.linear_features.append(QuantLinear(
            in_features=LAST_FC_IN_FEATURES,
            out_features=num_classes,
            bias=False,
            weight_quant=CommonWeightQuant,
            weight_bit_width=weight_bit_width))
        self.linear_features.append(TensorNorm())

        for m in self.modules():
            if isinstance(m, QuantConv2d) or isinstance(m, QuantLinear):
                torch.nn.init.uniform_(m.weight.data, -1, 1)

    def clip_weights(self, min_val, max_val):
        for mod in self.conv_features:
            if isinstance(mod, QuantConv2d):
                mod.weight.data.clamp_(min_val, max_val)
        for mod in self.linear_features:
            if isinstance(mod, QuantLinear):
                mod.weight.data.clamp_(min_val, max_val)

    def forward(self, x):
        x = 2.0 * x - torch.tensor([1.0], device=x.device)
        for mod in self.conv_features:
            x = mod(x)
        x = x.view(x.shape[0], -1)
        for mod in self.linear_features:
            x = mod(x)
        return x
Beispiel #6
0
class Generator(th.nn.Module):
    """
    Generator Module (block) of the GAN network
    Args:
        depth: required depth of the Network
        num_channels: number of output channels (default = 3 for RGB)
        latent_size: size of the latent manifold
        use_eql: whether to use equalized learning rate
    """

    def __init__(
        self,
        depth: int = 10,
        num_channels: int = 3,
        latent_size: int = 512,
        use_eql: bool = True,
    ) -> None:
        super().__init__()

        # object state:
        self.depth = depth
        self.latent_size = latent_size
        self.num_channels = num_channels
        self.use_eql = use_eql

        ConvBlock = EqualizedConv2d if use_eql else Conv2d

        self.layers = ModuleList(
            [GenInitialBlock(latent_size, nf(1), use_eql=self.use_eql)]
        )
        for stage in range(1, depth - 1):
            self.layers.append(GenGeneralConvBlock(nf(stage), nf(stage + 1), use_eql))

        self.rgb_converters = ModuleList(
            [
                ConvBlock(nf(stage), num_channels, kernel_size=(1, 1))
                for stage in range(1, depth)
            ]
        )

    def forward(self, x: Tensor, depth: int, alpha: float) -> Tensor:
        """
        forward pass of the Generator
        Args:
            x: input latent noise
            depth: depth from where the network's output is required
            alpha: value of alpha for fade-in effect

        Returns: generated images at the give depth's resolution
        """

        assert depth <= self.depth, f"Requested output depth {depth} cannot be produced"

        if depth == 2:
            y = self.rgb_converters[0](self.layers[0](x))
        else:
            y = x
            for layer_block in self.layers[: depth - 2]:
                y = layer_block(y)
            residual = interpolate(self.rgb_converters[depth - 3](y), scale_factor=2)
            straight = self.rgb_converters[depth - 2](self.layers[depth - 2](y))
            y = (alpha * straight) + ((1 - alpha) * residual)
        return y

    def get_save_info(self) -> Dict[str, Any]:
        return {
            "conf": {
                "depth": self.depth,
                "num_channels": self.num_channels,
                "latent_size": self.latent_size,
                "use_eql": self.use_eql,
            },
            "state_dict": self.state_dict(),
        }
Beispiel #7
0
class TFC(Module):
    def __init__(self,
                 num_classes=10,
                 weight_bit_width=None,
                 act_bit_width=None,
                 in_bit_width=None,
                 in_ch=1,
                 in_features=(28, 28)):
        super(TFC, self).__init__()

        weight_quant_type = get_quant_type(weight_bit_width)
        act_quant_type = get_quant_type(act_bit_width)
        in_quant_type = get_quant_type(in_bit_width)

        self.features = ModuleList()
        self.features.append(get_act_quant(in_bit_width, in_quant_type))
        self.features.append(Dropout(p=IN_DROPOUT))
        in_features = reduce(mul, in_features)
        for out_features in FC_OUT_FEATURES:
            self.features.append(
                get_quant_linear(
                    in_features=in_features,
                    out_features=out_features,
                    per_out_ch_scaling=INTERMEDIATE_FC_PER_OUT_CH_SCALING,
                    bit_width=weight_bit_width,
                    quant_type=weight_quant_type))
            in_features = out_features
            self.features.append(BatchNorm1d(num_features=in_features))
            self.features.append(get_act_quant(act_bit_width, act_quant_type))
            self.features.append(Dropout(p=HIDDEN_DROPOUT))
        self.features.append(
            get_quant_linear(in_features=in_features,
                             out_features=num_classes,
                             per_out_ch_scaling=LAST_FC_PER_OUT_CH_SCALING,
                             bit_width=weight_bit_width,
                             quant_type=weight_quant_type))
        self.features.append(BatchNorm1d(num_features=num_classes))

        for m in self.modules():
            if isinstance(m, QuantLinear):
                torch.nn.init.uniform_(m.weight.data, -1, 1)

    def clip_weights(self, min_val, max_val):
        for mod in self.features:
            if isinstance(mod, QuantLinear):
                mod.weight.data.clamp_(min_val, max_val)

    def forward(self, x):
        x = x.view(x.shape[0], -1)
        x = 2.0 * x - torch.tensor([1.0], device=x.device)
        for mod in self.features:
            x = mod(x)
        return x
Beispiel #8
0
def create_mobilenetv1_ssd_lite(num_classes, is_test=False):
    base_net = MobileNetV1(1001).model  # disable dropout layer

    source_layer_indexes = [
        12,
        14,
    ]
    extras = ModuleList([
        Sequential(
            Conv2d(in_channels=1024, out_channels=256, kernel_size=1),
            ReLU(),
            SeperableConv2d(in_channels=256,
                            out_channels=512,
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        Sequential(
            Conv2d(in_channels=512, out_channels=128, kernel_size=1),
            ReLU(),
            SeperableConv2d(in_channels=128,
                            out_channels=256,
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        Sequential(
            Conv2d(in_channels=256, out_channels=128, kernel_size=1),
            ReLU(),
            SeperableConv2d(in_channels=128,
                            out_channels=256,
                            kernel_size=3,
                            stride=2,
                            padding=1),
        ),
        Sequential(
            Conv2d(in_channels=256, out_channels=128, kernel_size=1), ReLU(),
            SeperableConv2d(in_channels=128,
                            out_channels=256,
                            kernel_size=3,
                            stride=2,
                            padding=1))
    ])

    regression_headers = ModuleList([
        SeperableConv2d(in_channels=512,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=1024,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=1),
    ])

    classification_headers = ModuleList([
        SeperableConv2d(in_channels=512,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=1024,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=1),
    ])

    return SSD(num_classes,
               base_net,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config)
Beispiel #9
0
def create_vgg_ssd(num_classes, is_test=False):
    vgg_config = [
        64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
        512, 512, 512
    ]
    base_net = ModuleList(vgg(vgg_config))

    source_layer_indexes = [
        (23, ScaledL2Norm(512, 20)),
        len(base_net),
    ]
    extras = ModuleList([
        Sequential(
            Conv2d(in_channels=1024, out_channels=256, kernel_size=1), ReLU(),
            Conv2d(in_channels=256,
                   out_channels=512,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU()),
        Sequential(
            Conv2d(in_channels=512, out_channels=128, kernel_size=1), ReLU(),
            Conv2d(in_channels=128,
                   out_channels=256,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU()),
        Sequential(Conv2d(in_channels=256, out_channels=128, kernel_size=1),
                   ReLU(),
                   Conv2d(in_channels=128, out_channels=256, kernel_size=3),
                   ReLU()),
        Sequential(Conv2d(in_channels=256, out_channels=128, kernel_size=1),
                   ReLU(),
                   Conv2d(in_channels=128, out_channels=256, kernel_size=3),
                   ReLU())
    ])

    regression_headers = ModuleList([
        Conv2d(in_channels=512, out_channels=4 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=1024, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=4 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=4 * 4, kernel_size=3,
               padding=1),  # TODO: change to kernel_size=1, padding=0?
    ])

    classification_headers = ModuleList([
        Conv2d(in_channels=512,
               out_channels=4 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=1024,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=512,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=4 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=4 * num_classes,
               kernel_size=3,
               padding=1),  # TODO: change to kernel_size=1, padding=0?
    ])

    return SSD(num_classes,
               base_net,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config)
class Generator(Module):
    def __init__(self, gen_channel, texture_size, style_size, gen_nonlocal_loc,
                 gen_lr, img_size, device):
        super().__init__()
        if device == 'cpu':
            self.use_gpu = False
        elif 'cuda:' in device:
            self.use_gpu = True
        else:
            assert Exception('invalid argument in Network2.Generator')
        self.gen_channel = gen_channel
        self.basic_texture = torch.nn.Parameter(
            torch.rand(gen_channel, texture_size, texture_size))
        self.weight_scaling_1 = Weight_Scaling(gen_channel * 3 * 3,
                                               LEAKY_RELU_GAIN)
        self.conv = Conv2d(in_channels=gen_channel,
                           out_channels=gen_channel,
                           kernel_size=3,
                           stride=1,
                           padding=1)
        self.LeakyReLU = LeakyReLU(0.2)
        self.style_scaling = Weight_Scaling(style_size, 1)
        self.style_affine_1 = Linear(style_size, gen_channel * 2)
        self.noise_scaler_1 = torch.nn.Parameter(
            torch.zeros(gen_channel).view(1, gen_channel, 1, 1))
        self.style_affine_2 = Linear(style_size, gen_channel * 2)
        self.noise_scaler_2 = torch.nn.Parameter(
            torch.zeros(gen_channel).view(1, gen_channel, 1, 1))
        self.module_list = ModuleList()
        in_channels = gen_channel
        in_size = 4
        cnt = 0
        while True:
            cnt += 1
            self.module_list.append(
                Generator_Conv(in_channels, in_channels // 2, 3, style_size,
                               self.use_gpu))
            if cnt == gen_nonlocal_loc:
                print('gen: non_local block inserted, in_size: ', 2 * in_size)
                self.module_list.append(Non_Local(in_channels // 2))
            in_channels //= 2
            in_size *= 2
            if in_size >= img_size:
                break
        self.weight_scaling_2 = Weight_Scaling(in_channels * 1 * 1, 1)
        self.last_layer = Conv2d(in_channels=in_channels,
                                 out_channels=3,
                                 kernel_size=1,
                                 stride=1)
        self.to(device)
        self.opt = Adam(self.parameters(), lr=gen_lr, betas=BETAS)
        self.apply(init_weights)

    def forward(self, style_base):
        batch_size = style_base.size()[0]
        x = self.basic_texture.repeat(batch_size, 1, 1, 1)
        noise = make_noise_img((batch_size, 1, 4, 4))
        if self.use_gpu:
            with torch.cuda.device_of(style_base):
                noise = noise.cuda()
        else:
            noise = noise.cpu()
        x = x + self.noise_scaler_1 * noise
        x = AdaIN(
            x,
            self.style_affine_1(self.style_scaling(style_base)).view(
                -1, 2 * self.gen_channel, 1, 1))
        x = self.LeakyReLU(
            self.conv(self.weight_scaling_1(x)) + self.noise_scaler_2 * noise)
        x = AdaIN(
            x,
            self.style_affine_2(self.style_scaling(style_base)).view(
                -1, 2 * self.gen_channel, 1, 1))
        for m in self.module_list:
            if type(m) != Non_Local:
                x = m(x, style_base)
            else:
                x = m(x)
        x = self.weight_scaling_2(x)
        #f*****g bug! pytorch1.4.0 has bug on conv with 1*1 kernel!
        x = x.contiguous()
        #above line fix the bug
        x = F.tanh(self.last_layer(x))
        return x
Beispiel #11
0
 def __init__(self, num_langs, encoder_args):
     super(MultiEncoder, self).__init__()
     self._num_langs = num_langs
     self._encoders = ModuleList(
         [Encoder(*encoder_args) for _ in range(num_langs)])
class Discriminator(Module):
    def __init__(self, disc_first_channel, disc_last_size, disc_nonlocal_loc,
                 disc_lr, img_size, device):
        super().__init__()
        if not (device == 'cpu' or 'cuda:' in device):
            assert Exception('invalid argument in Network2.Discriminator')
        self.module_list = ModuleList()
        in_channels = 3
        out_channels = disc_first_channel
        self.module_list.append(
            Weight_Scaling(in_channels * 1 * 1, LEAKY_RELU_GAIN))
        self.module_list.append(
            Conv2d(in_channels=in_channels,
                   out_channels=disc_first_channel,
                   kernel_size=1,
                   stride=1))
        self.module_list.append(LeakyReLU(0.2))
        in_size = img_size
        cnt = 0
        while True:
            cnt += 1
            in_channels = out_channels
            out_channels *= 2
            if in_size == disc_last_size:
                break
            self.module_list.append(Disc_Conv(in_channels, out_channels))
            if cnt == disc_nonlocal_loc:
                print('disc: non_local block inserted, in_size: ',
                      in_size // 2)
                self.module_list.append(Non_Local(out_channels))
            in_size //= 2
        self.module_list.append(Minibatch_Stddev())
        self.module_list.append(
            Weight_Scaling((in_channels + 1) * 3 * 3, LEAKY_RELU_GAIN))
        self.module_list.append(
            Conv2d(in_channels=in_channels + 1,
                   out_channels=in_channels,
                   kernel_size=3,
                   stride=1,
                   padding=1))
        self.module_list.append(LeakyReLU(0.2))
        self.module_list.append(
            Weight_Scaling(in_channels * 4 * 4, LEAKY_RELU_GAIN))
        self.module_list.append(
            Conv2d(in_channels=in_channels,
                   out_channels=in_channels,
                   kernel_size=4,
                   stride=1,
                   padding=0))
        self.module_list.append(LeakyReLU(0.2))
        self.module_list.append(Flatten())
        self.module_list.append(Weight_Scaling(in_channels, 1))
        self.module_list.append(Linear(in_channels, 1))
        self.to(device)
        self.opt = Adam(self.parameters(), lr=disc_lr, betas=BETAS)
        self.apply(init_weights)

    def forward(self, x):
        for m in self.module_list:
            x = m(x)
        return x
 def __init__(self, base_kernels):
     super(BatchKernel, self).__init__(batch_size=len(base_kernels))
     self.base_kernels = ModuleList(base_kernels)
    def __init__(self, base_means):
        super(BatchMean, self).__init__()

        self.base_means = ModuleList(base_means)
Beispiel #15
0
 def __init__(self, *kernels):
     super(AdditiveKernel, self).__init__()
     self.kernels = ModuleList(kernels)
Beispiel #16
0
class PathNetWithMP(torch.nn.Module):
    path_layers : Sequence[PathConvolutionLayer]

    def __init__(self, hidden_channels, out_channels, num_layers, dropout=0.0,
                 path_lengths=None, path_depth=1, inter_message_passing=True):
        super(PathNetWithMP, self).__init__()
        self.num_layers = num_layers
        self.dropout = dropout
        self.inter_message_passing = inter_message_passing

        if path_lengths is None:
            path_lengths = [3, 4]
        self.path_lengths = path_lengths
        self.path_keys = [str(k) for k in path_lengths]

        self._build_base_mp_layers(hidden_channels, num_layers)

        self.lin = Linear(hidden_channels, out_channels)

        self.path_layers = ModuleList(
            PathConvolutionLayer(hidden_channels, path_depth, path_lengths, dropout)
            for _ in range(num_layers))

        self.path_embedding = PathEmbeddingLayer(6, hidden_channels, path_lengths)


    def _build_base_mp_layers(self, hidden_channels, num_layers):
        self.atom_encoder = blocks.AtomEncoder(hidden_channels)
        self.bond_encoders = ModuleList()
        self.atom_convs = ModuleList()
        self.atom_batch_norms = ModuleList()

        for _ in range(num_layers):
            self.bond_encoders.append(blocks.BondEncoder(hidden_channels))
            nn = Sequential(
                Linear(hidden_channels, 2 * hidden_channels),
                BatchNorm1d(2 * hidden_channels),
                ReLU(),
                Linear(2 * hidden_channels, hidden_channels),
            )
            self.atom_convs.append(GINEConv(nn, train_eps=True))
            self.atom_batch_norms.append(BatchNorm1d(hidden_channels))
        self.atom_lin = Linear(hidden_channels, hidden_channels)

    def forward(self, data):
        x = self.atom_encoder(data.x.squeeze())
        x_paths = {k: data['x_clique_%s_path' % k] for k in self.path_keys}
        atom_to_path_mapping = {k: data['atom2clique_%s_path' % k] for k in self.path_keys}

        # Apply initial embedding of path features.
        x_paths = self.path_embedding(x_paths)

        for i in range(self.num_layers):
            edge_attr = self.bond_encoders[i](data.edge_attr)
            x = self.atom_convs[i](x, data.edge_index, edge_attr)
            x = self.atom_batch_norms[i](x)
            x = F.relu(x)
            x = F.dropout(x, self.dropout, training=self.training)

            if self.inter_message_passing:
                # Blocks used in this layer
                x, x_paths = self.path_layers[i](x, x_paths, atom_to_path_mapping)

        # Aggregate output and run an MLP on top.
        x = scatter(x, data.batch, dim=0, reduce='mean')
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.atom_lin(x)
        x = F.relu(x)
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.lin(x)
        return x
Beispiel #17
0
 def __init__(self, *kernels):
     super(ProductKernel, self).__init__()
     self.kernels = ModuleList(kernels)
Beispiel #18
0
    def __init__(self,
                 n_dim: int,
                 e_dim: int,
                 config,
                 position_encoder=None,
                 use_pos=False,
                 use_cuda=False,
                 q_only=False):
        super(AMPNN, self).__init__()
        self.h_dim = config['F_DIM']
        self.c_dims = config['C_DIMS']
        self.he_dim = config['HE_DIM']
        self.layers = len(config['C_DIMS'])
        self.m_radius = config['M_RADIUS']
        self.dropout = config['DROPOUT']
        self.position_encoders = [position_encoder]
        self.use_pos = use_pos
        self.use_cuda = use_cuda
        assert not (use_pos and position_encoder)
        if use_pos:
            self.pos_dim = config['POS_DIM']
            self.pos_trans = Linear(3, self.pos_dim)
        elif position_encoder:
            self.pos_dim = config['POS_DIM']
            if position_encoder.use_rdkit:
                self.pos_dim = 3
        else:
            self.pos_dim = 0
        self.q_only = q_only
        self.temp_mask = torch.tensor([0] * int(self.pos_dim / 2) +
                                      [1] * int(self.pos_dim / 2),
                                      dtype=torch.float32)
        if self.use_cuda:
            self.temp_mask = self.temp_mask.cuda()

        in_dims = [self.h_dim] * (self.layers + 1)
        self.e_dim = e_dim
        self.FC_N = Linear(n_dim, self.h_dim, bias=True)
        self.FC_E = Linear(e_dim, self.he_dim, bias=True)
        # if self.position_encoders[0] or self.use_pos:
        #     self.Ms = ModuleList([PosConcatMesPassing(in_dims[i], self.he_dim, self.pos_dim, self.c_dims[i],
        #                                               dropout=self.dropout)
        #                           for i in range(self.layers)])
        # else:
        #     #     self.Ms = ModuleList([ConcatMesPassing(in_dims[i], self.he_dim, self.c_dims[i], dropout=self.dropout)
        #     #                           for i in range(self.layers)])
        #     self.Ms = ModuleList([MolGATMesPassing(in_dims[i], self.he_dim, self.pos_dim, self.c_dims[i],
        #                                            dropout=self.dropout, use_cuda=use_cuda)
        #                           for i in range(self.layers)])
        self.Ms = ModuleList([
            MolGATMesPassing(in_dims[i],
                             self.he_dim,
                             self.pos_dim,
                             self.c_dims[i],
                             dropout=self.dropout,
                             use_cuda=use_cuda) for i in range(self.layers)
        ])
        self.Us = ModuleList([
            GRUAggregation(self.c_dims[i], in_dims[i])
            for i in range(self.layers)
        ])
        self.R = AlignAttendPooling(in_dims[-1],
                                    in_dims[-1],
                                    self.pos_dim,
                                    radius=self.m_radius,
                                    use_cuda=use_cuda,
                                    dropout=self.dropout)
Beispiel #19
0
class ProGrowGenerator(nn.Module):
    """ Generator of the progressive growing GAN network """
    def __init__(self, depth=7, latent_size=512, use_eql=True):
        """
        constructor for the Generator class
        :param depth: required depth of the Network
        :param latent_size: size of the latent manifold
        :param use_eql: whether to use equalized learning rate
        """
        from torch.nn import ModuleList, Upsample

        super(ProGrowGenerator, self).__init__()

        assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
            "latent size not a power of 2"
        if depth >= 4:
            assert latent_size >= np.power(
                2, depth - 4), "latent size will diminish to zero"

        # state of the generator:
        self.use_eql = use_eql
        self.depth = depth
        self.latent_size = latent_size

        # register the modules required for the GAN
        self.initial_block = ProGrowInitialBlock(self.latent_size,
                                                 use_eql=self.use_eql)

        # create a module list of the other required general convolution blocks
        self.layers = ModuleList([])  # initialize to empty list

        # create the ToRGB layers for various outputs:
        if self.use_eql:
            from .layers import equalized_conv2d
            self.toRGB = lambda in_channels: \
                equalized_conv2d(in_channels, 3, (1, 1), bias=True)
        else:
            from torch.nn import Conv2d
            self.toRGB = lambda in_channels: Conv2d(
                in_channels, 3, (1, 1), bias=True)

        self.rgb_converters = ModuleList([self.toRGB(self.latent_size)])

        # create the remaining layers
        for i in range(self.depth - 1):
            if i <= 2:
                layer = ProGrowConvBlock(self.latent_size,
                                         self.latent_size,
                                         use_eql=self.use_eql)
                rgb = self.toRGB(self.latent_size)
            else:
                layer = ProGrowConvBlock(
                    int(self.latent_size // np.power(2, i - 3)),
                    int(self.latent_size // np.power(2, i - 2)),
                    use_eql=self.use_eql)
                rgb = self.toRGB(int(self.latent_size // np.power(2, i - 2)))
            self.layers.append(layer)
            self.rgb_converters.append(rgb)

        # register the temporary upsampler
        self.temporaryUpsampler = Upsample(scale_factor=2)

    def forward(self, x, depth, alpha):
        """
        forward pass of the Generator
        :param x: input noise
        :param depth: current depth from where output is required
        :param alpha: value of alpha for fade-in effect
        :return: y => output
        """

        # assert depth < self.depth, "Requested output depth cannot be produced"

        y = self.initial_block(x)

        if depth > 0:
            for block in self.layers[:depth - 1]:
                y = block(y)

            residual = self.rgb_converters[depth - 1](
                self.temporaryUpsampler(y))
            straight = self.rgb_converters[depth](self.layers[depth - 1](y))

            out = (alpha * straight) + ((1 - alpha) * residual)

        else:
            out = self.rgb_converters[0](y)

        return out
Beispiel #20
0
 def __init__(self, flows, dbgprint=False):
     super().__init__()
     self.flows = ModuleList(flows)
     self.dbgprint = dbgprint
    def __init__(self, train_x, train_y, X_max_v, likelihood, MAP=True):
        '''
        Inputs:
            - train_x:
            - train_y:
            - likelihood:
        '''
        super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)

        # define priors
        outputscale_prior = gpytorch.priors.GammaPrior(concentration=1,
                                                       rate=10)
        lengthscale_prior = gpytorch.priors.GammaPrior(concentration=4,
                                                       rate=1 / 5)
        rho_prior = gpytorch.priors.UniformPrior(-1, 1)
        unit_outputscale_prior = gpytorch.priors.GammaPrior(concentration=1,
                                                            rate=10)
        unit_lengthscale_prior = gpytorch.priors.GammaPrior(concentration=4,
                                                            rate=1 / 5)
        drift_outputscale_prior = gpytorch.priors.GammaPrior(concentration=1,
                                                             rate=20)
        drift_lengthscale_prior = gpytorch.priors.GammaPrior(concentration=5,
                                                             rate=1 / 5)
        weekday_prior = gpytorch.priors.GammaPrior(concentration=1, rate=10)
        day_prior = gpytorch.priors.GammaPrior(concentration=1, rate=10)

        # treatment/control groups
        self.num_groups = 2
        self.num_units = len(train_x[:, -3].unique())

        # categoritcal features: group/weekday/day/unit id
        self.X_max_v = X_max_v
        # dim of covariates
        self.d = list(train_x.shape)[1] - 1

        # same mean of unit bias for all units, could extend this to be unit-dependent
        # self.unit_mean_module = gpytorch.means.ConstantMean()
        self.unit_mean_module = ConstantVectorMean(d=self.num_units)
        self.group_mean_module = ConstantVectorMean(d=self.num_groups)

        # marginalize weekday/day/unit id effects
        self.x_covar_module = ModuleList(
            [constantKernel(num_tasks=v + 1) for v in self.X_max_v])

        # self.x_covar_module = ModuleList([constantKernel(num_tasks=X_max_v[0]+1, prior=weekday_prior),
        #         constantKernel(num_tasks=X_max_v[1]+1, prior=day_prior),
        #         constantKernel(num_tasks=X_max_v[2]+1)])

        # group-level time trend
        self.group_t_covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(\
                active_dims=torch.tensor([self.d]),\
                     lengthscale_prior=lengthscale_prior if MAP else None),\
                    outputscale_prior=outputscale_prior if MAP else None)

        # indicator covariances
        self.x_indicator_module = ModuleList(
            [myIndicatorKernel(num_tasks=v + 1) for v in X_max_v])
        self.group_index_module = myIndexKernel(num_tasks=self.num_groups,\
             rho_prior=rho_prior if MAP else None)

        # unit-level zero-meaned time trend
        self.unit_t_covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(\
            active_dims=torch.tensor([self.d]),\
            lengthscale_prior=unit_lengthscale_prior if MAP else None),\
            outputscale_prior=unit_outputscale_prior if MAP else None)

        self.unit_indicator_module = myIndicatorKernel(
            num_tasks=len(train_x[:, -3].unique()))

        # drift process for treatment effect
        self.drift_t_module = DriftScaleKernel(gpytorch.kernels.RBFKernel(\
                active_dims=torch.tensor([self.d]),\
                lengthscale_prior=drift_lengthscale_prior if MAP else None),\
                outputscale_prior=drift_outputscale_prior if MAP else None)
        self.drift_indicator_module = DriftIndicatorKernel(
            num_tasks=self.num_groups)
def _get_clones(module, N):
    return ModuleList([copy.deepcopy(module) for i in range(N)])
Beispiel #23
0
    def __init__(self,
                 use_model,
                 node_features,
                 n_layers,
                 k_nn,
                 hidden_channels=300,
                 latent_channels=100,
                 loop=False):
        super(ModelGNN, self).__init__()

        # Graph layers
        layers = []
        in_channels = node_features
        for i in range(n_layers):

            # Choose the model
            if use_model == "DeepSet":
                lay = Sequential(Linear(in_channels, hidden_channels), ReLU(),
                                 Linear(hidden_channels, hidden_channels),
                                 ReLU(),
                                 Linear(hidden_channels, latent_channels))

            elif use_model == "GCN":
                lay = GCNConv(in_channels, latent_channels)

            elif use_model == "PointNet":
                lay = PointNetLayer(in_channels, hidden_channels,
                                    latent_channels)

            elif use_model == "EdgeNet":
                lay = EdgeLayer(in_channels, hidden_channels, latent_channels)
                #lay = EdgeConv(Sequential(Linear(2*in_channels, hidden_channels),ReLU(),Linear(hidden_channels, hidden_channels),ReLU(),Linear(hidden_channels, latent_channels)))  # Using the pytorch-geometric implementation, same result

            elif use_model == "EdgePoint":
                lay = EdgePointLayer(in_channels, hidden_channels,
                                     latent_channels)

            elif use_model == "MetaNet":
                if use_model == "MetaNet" and i == 2: in_channels = 610
                #lay = MetaLayer(node_model=NodeModel(in_channels, hidden_channels, latent_channels), global_model=GlobalModel(in_channels, hidden_channels, latent_channels))
                lay = MetaLayer(node_model=NodeModel(
                    in_channels, hidden_channels, latent_channels))

            else:
                print("Model not known...")

            layers.append(lay)
            in_channels = latent_channels
            if use_model == "MetaNet":
                in_channels = (node_features + latent_channels * 3 + 2)

        self.layers = ModuleList(layers)

        lin_in = latent_channels * 3 + 2
        if use_model == "MetaNet":
            lin_in = (in_channels + latent_channels * 3 + 2) * 3 + 2
        if use_model == "MetaNet" and n_layers == 3: lin_in = 2738
        self.lin = Sequential(Linear(lin_in, latent_channels), ReLU(),
                              Linear(latent_channels, latent_channels), ReLU(),
                              Linear(latent_channels, 2))

        self.k_nn = k_nn
        self.pooled = 0.
        self.h = 0.
        self.loop = loop
        if use_model == "PointNet" or use_model == "GCN": self.loop = True
        self.namemodel = use_model
Beispiel #24
0
def create_mobilenetv3_ssd_lite(num_classes, width_mult=1.0, is_test=False):
    base_net = MobileNetV3().features

    source_layer_indexes = [
        GraphPath(11, 'conv'),
        20,
    ]
    # extras = ModuleList([
    #     InvertedResidual(1280, 512, stride=2, expand_ratio=0.2),
    #     InvertedResidual(512, 256, stride=2, expand_ratio=0.25),
    #     InvertedResidual(256, 256, stride=2, expand_ratio=0.5),
    #     InvertedResidual(256, 64, stride=2, expand_ratio=0.25)
    # ])

    # regression_headers = ModuleList([
    #     SeperableConv2d(in_channels=round(576 * width_mult), out_channels=6 * 4,
    #                     kernel_size=3, padding=1, onnx_compatible=False),
    #     SeperableConv2d(in_channels=1280, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False),
    #     SeperableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False),
    #     SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False),
    #     SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False),
    #     Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1),
    # ])

    # classification_headers = ModuleList([
    #     SeperableConv2d(in_channels=round(576 * width_mult), out_channels=6 * num_classes, kernel_size=3, padding=1),
    #     SeperableConv2d(in_channels=1280, out_channels=6 * num_classes, kernel_size=3, padding=1),
    #     SeperableConv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1),
    #     SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1),
    #     SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1),
    #     Conv2d(in_channels=64, out_channels=6 * num_classes, kernel_size=1),
    # ])

    # return SSD(num_classes, base_net, source_layer_indexes,
    #            extras, classification_headers, regression_headers, is_test=is_test, config=config)
    extras = ModuleList([
        InvertedResidual(1280, 512, stride=2, expand_ratio=0.2),
        InvertedResidual(512, 256, stride=2, expand_ratio=0.25),
        InvertedResidual(256, 256, stride=2, expand_ratio=0.5),
        InvertedResidual(256, 64, stride=2, expand_ratio=0.25)
    ])

    regression_headers = ModuleList([
        SeperableConv2d(in_channels=round(288 * width_mult),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=1280,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1),
    ])

    classification_headers = ModuleList([
        SeperableConv2d(in_channels=round(288 * width_mult),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=1280,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=64, out_channels=6 * num_classes, kernel_size=1),
    ])

    return SSD(num_classes,
               base_net,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config)
Beispiel #25
0
def create_mobilenetv2_ssd_lite(num_classes,
                                width_mult=1.0,
                                use_batch_norm=True,
                                onnx_compatible=False,
                                is_test=False):
    base_net = MobileNetV2(width_mult=width_mult,
                           use_batch_norm=use_batch_norm,
                           onnx_compatible=onnx_compatible).features

    source_layer_indexes = [
        GraphPath(14, 'conv', 3),
        19,
    ]
    extras = ModuleList([
        InvertedResidual(1280, 512, stride=2, expand_ratio=0.2),
        InvertedResidual(512, 256, stride=2, expand_ratio=0.25),
        InvertedResidual(256, 256, stride=2, expand_ratio=0.5),
        InvertedResidual(256, 64, stride=2, expand_ratio=0.25)
    ])

    regression_headers = ModuleList([
        SeperableConv2d(in_channels=round(576 * width_mult),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=1280,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1),
    ])

    classification_headers = ModuleList([
        SeperableConv2d(in_channels=round(576 * width_mult),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=1280,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=64, out_channels=6 * num_classes, kernel_size=1),
    ])

    return SSD(num_classes,
               base_net,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config,
               device=torch.device('cpu'))
Beispiel #26
0
def _clones(module: Module, num_layers: int):
    """Produce N identical layers."""
    return ModuleList([deepcopy(module) for _ in range(num_layers)])
Beispiel #27
0
    def __init__(self,
                 input_features: Dict[str, "InputFeature"] = None,
                 config: TabTransformerCombinerConfig = None,
                 **kwargs):
        super().__init__(input_features)
        self.name = "TabTransformerCombiner"
        logger.debug(f"Initializing {self.name}")

        if config.reduce_output is None:
            raise ValueError("TabTransformer requires the `reduce_output` "
                             "parameter")
        self.reduce_output = config.reduce_output
        self.reduce_sequence = SequenceReducer(
            reduce_mode=config.reduce_output,
            max_sequence_length=len(input_features),
            encoding_size=config.hidden_size)
        self.supports_masking = True

        self.embed_input_feature_name = config.embed_input_feature_name
        if self.embed_input_feature_name:
            vocab = [
                i_f for i_f in input_features
                if input_features[i_f].type() != NUMBER
                or input_features[i_f].type() != BINARY
            ]
            if self.embed_input_feature_name == "add":
                self.embed_i_f_name_layer = Embed(vocab,
                                                  config.hidden_size,
                                                  force_embedding_size=True)
                projector_size = config.hidden_size
            elif isinstance(self.embed_input_feature_name, int):
                if self.embed_input_feature_name > config.hidden_size:
                    raise ValueError("TabTransformer parameter "
                                     "`embed_input_feature_name` "
                                     "specified integer value ({}) "
                                     "needs to be smaller than "
                                     "`hidden_size` ({}).".format(
                                         self.embed_input_feature_name,
                                         config.hidden_size))
                self.embed_i_f_name_layer = Embed(
                    vocab,
                    self.embed_input_feature_name,
                    force_embedding_size=True,
                )
                projector_size = config.hidden_size - self.embed_input_feature_name
            else:
                raise ValueError("TabTransformer parameter "
                                 "`embed_input_feature_name` "
                                 "should be either None, an integer or `add`, "
                                 "the current value is "
                                 "{}".format(self.embed_input_feature_name))
        else:
            projector_size = config.hidden_size

        logger.debug("  Projectors")
        self.unembeddable_features = []
        self.embeddable_features = []
        for i_f in input_features:
            if input_features[i_f].type in {NUMBER, BINARY}:
                self.unembeddable_features.append(i_f)
            else:
                self.embeddable_features.append(i_f)

        self.projectors = ModuleList()
        for i_f in self.embeddable_features:
            flatten_size = self.get_flatten_size(
                input_features[i_f].output_shape)
            self.projectors.append(Linear(flatten_size[0], projector_size))

        # input to layer_norm are the encoder outputs for unembeddable features,
        # which are number or binary features.  These should be 2-dim
        # tensors.  Size should be concatenation of these tensors.
        concatenated_unembeddable_encoders_size = 0
        for i_f in self.unembeddable_features:
            concatenated_unembeddable_encoders_size += input_features[
                i_f].output_shape[0]

        self.layer_norm = torch.nn.LayerNorm(
            concatenated_unembeddable_encoders_size)

        logger.debug("  TransformerStack")
        self.transformer_stack = TransformerStack(
            input_size=config.hidden_size,
            max_sequence_length=len(self.embeddable_features),
            hidden_size=config.hidden_size,
            # todo: can we just use projector_size? # hidden_size,
            num_heads=config.num_heads,
            output_size=config.transformer_output_size,
            num_layers=config.num_layers,
            dropout=config.dropout,
        )

        logger.debug("  FCStack")

        # determine input size to fully connected layer based on reducer
        if config.reduce_output == "concat":
            fc_input_size = len(self.embeddable_features) * config.hidden_size
        else:
            fc_input_size = self.reduce_sequence.output_shape[-1] if len(
                self.embeddable_features) > 0 else 0
        self.fc_stack = FCStack(
            fc_input_size + concatenated_unembeddable_encoders_size,
            layers=config.fc_layers,
            num_layers=config.num_fc_layers,
            default_output_size=config.output_size,
            default_use_bias=config.use_bias,
            default_weights_initializer=config.weights_initializer,
            default_bias_initializer=config.bias_initializer,
            default_norm=config.norm,
            default_norm_params=config.norm_params,
            default_activation=config.fc_activation,
            default_dropout=config.fc_dropout,
            fc_residual=config.fc_residual,
        )

        self._empty_hidden = torch.empty([1, 0])
        self._embeddable_features_indices = torch.arange(
            0, len(self.embeddable_features))

        # Create empty tensor of shape [1, 0] to use as hidden in case there are no category or numeric/binary features.
        self.register_buffer("empty_hidden", self._empty_hidden)
        self.register_buffer("embeddable_features_indices",
                             self._embeddable_features_indices)
Beispiel #28
0
    def __init__(
        self,
        train_X: Tensor,
        train_Y: Tensor,
        likelihood: Optional[Likelihood] = None,
        covar_modules: Optional[List[Kernel]] = None,
        num_latent_dims: Optional[List[int]] = None,
        learn_latent_pars: bool = True,
        first_dim_is_batch: bool = False,
        latent_init: str = "default",
        outcome_transform: Optional[OutcomeTransform] = None,
        input_transform: Optional[InputTransform] = None,
    ):
        r"""A HigherOrderGP model for high-dim output regression.

        Args:
            train_X: Training inputs
            train_Y: Training targets
            likelihood: Gaussian likelihood for the model.
            covar_modules: List of kernels for each output structure.
            num_latent_dims: Sizes for the latent dimensions.
            learn_latent_pars: If true, learn the latent parameters.
            first_dim_is_batch: If true, the first dimension of train_Y should be
                regarded as a batch dimension (e.g. predicting batches of tensors).
            latent_init: [default or gp] how to initialize the latent parameters.
        """

        if input_transform is not None:
            input_transform.to(train_X)

        if outcome_transform is not None:
            train_Y, _ = outcome_transform(train_Y)

        if first_dim_is_batch:
            self._aug_batch_shape = train_Y.shape[:1]
            self._num_dimensions = len(train_Y.shape) - 1
            self._num_outputs = train_Y.shape[0]
        else:
            self._aug_batch_shape = Size()
            self._num_dimensions = len(train_Y.shape)
            self._num_outputs = 1

        self._input_batch_shape = train_X.shape[:-2]

        if likelihood is None:

            noise_prior = GammaPrior(1.1, 0.05)
            noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate
            likelihood = GaussianLikelihood(
                noise_prior=noise_prior,
                batch_shape=self._aug_batch_shape,
                noise_constraint=GreaterThan(
                    MIN_INFERRED_NOISE_LEVEL,
                    transform=None,
                    initial_value=noise_prior_mode,
                ),
            )
        else:
            self._is_custom_likelihood = True

        super().__init__(
            train_X,
            train_Y.view(*self._aug_batch_shape, -1),
            likelihood=likelihood,
        )

        if covar_modules is not None:
            self.covar_modules = ModuleList(covar_modules)
        else:
            self.covar_modules = ModuleList(
                [
                    MaternKernel(
                        nu=2.5,
                        lengthscale_prior=GammaPrior(3.0, 6.0),
                        batch_shape=self._aug_batch_shape,
                        ard_num_dims=1 if dim > 0 else train_X.shape[-1],
                    )
                    for dim in range(self._num_dimensions)
                ]
            )

        if num_latent_dims is None:
            num_latent_dims = [1] * (self._num_dimensions - 1)

        if first_dim_is_batch:
            self.target_shape = train_Y.shape[2:]
        else:
            self.target_shape = train_Y.shape[1:]

        self.to(train_X.device)

        self._initialize_latents(
            latent_init=latent_init,
            num_latent_dims=num_latent_dims,
            learn_latent_pars=learn_latent_pars,
            device=train_Y.device,
            dtype=train_Y.dtype,
        )

        if outcome_transform is not None:
            self.outcome_transform = outcome_transform
        if input_transform is not None:
            self.input_transform = input_transform
Beispiel #29
0
    def __init__(
        self,
        parameters: dict,
    ):
        super(unetr, self).__init__(parameters)

        if not ("inner_patch_size" in parameters["model"]):
            parameters["model"]["inner_patch_size"] = parameters["patch_size"][0]
            print("Default inner patch size set to %d." % parameters["patch_size"][0])

        if "inner_patch_size" in parameters["model"]:
            if np.ceil(np.log2(parameters["model"]["inner_patch_size"])) != np.floor(
                np.log2(parameters["model"]["inner_patch_size"])
            ):
                sys.exit("The inner patch size must be a power of 2.")

        self.patch_size = parameters["model"]["inner_patch_size"]
        self.depth = int(np.log2(self.patch_size))
        patch_check = checkPatchDimensions(parameters["patch_size"], self.depth)

        if patch_check != self.depth and patch_check >= 2:
            print(
                "The image size is not large enough for desired depth. It is expected that each dimension of the image is divisible by 2^i, where i is in a integer greater than or equal to 2. Only the first %d layers will run."
                % patch_check
            )
        elif patch_check < 2:
            sys.exit(
                "The image size is not large enough for desired depth. It is expected that each dimension of the image is divisible by 2^i, where i is in a integer greater than or equal to 2."
            )

        if not ("num_heads" in parameters["model"]):
            parameters["model"]["num_heads"] = 12
            print(
                "Default number of heads in multi-head self-attention (MSA) set to 12."
            )

        if not ("embed_dim" in parameters["model"]):
            parameters["model"]["embed_dim"] = 768
            print("Default size of embedded dimension set to 768.")

        if self.n_dimensions == 2:
            self.img_size = parameters["patch_size"][0:2]
        elif self.n_dimensions == 3:
            self.img_size = parameters["patch_size"]

        self.num_layers = 3 * self.depth  # number of transformer layers
        self.out_layers = np.arange(2, self.num_layers, 3)
        self.num_heads = parameters["model"]["num_heads"]
        self.embed_size = parameters["model"]["embed_dim"]
        self.patch_dim = [i // self.patch_size for i in self.img_size]

        if not all([i % self.patch_size == 0 for i in self.img_size]):
            sys.exit(
                "The image size is not divisible by the patch size in at least 1 dimension. UNETR is not defined in this case."
            )
        if not all([self.patch_size <= i for i in self.img_size]):
            sys.exit("The inner patch size must be smaller than the input image.")

        self.transformer = _Transformer(
            img_size=self.img_size,
            patch_size=self.patch_size,
            in_feats=self.n_channels,
            embed_size=self.embed_size,
            num_heads=self.num_heads,
            mlp_dim=2048,
            num_layers=self.num_layers,
            out_layers=self.out_layers,
            Conv=self.Conv,
            Norm=self.Norm,
        )

        self.upsampling = ModuleList([])
        self.convs = ModuleList([])

        for i in range(0, self.depth - 1):
            # add deconv blocks
            tempconvs = nn.Sequential()
            tempconvs.add_module(
                "conv0",
                _DeconvConvBlock(
                    self.embed_size,
                    32 * 2**self.depth,
                    self.Norm,
                    self.Conv,
                    self.ConvTranspose,
                ),
            )

            for j in range(self.depth - 2, i, -1):
                tempconvs.add_module(
                    "conv%d" % j,
                    _DeconvConvBlock(
                        128 * 2**j,
                        128 * 2 ** (j - 1),
                        self.Norm,
                        self.Conv,
                        self.ConvTranspose,
                    ),
                )

            self.convs.append(tempconvs)

            # add upsampling
            self.upsampling.append(
                _UpsampleBlock(
                    128 * 2 ** (i + 1), self.Norm, self.Conv, self.ConvTranspose
                )
            )

        # add upsampling for transformer output (no convs)
        self.upsampling.append(
            self.ConvTranspose(
                in_channels=self.embed_size,
                out_channels=32 * 2**self.depth,
                kernel_size=2,
                stride=2,
                padding=0,
                output_padding=0,
            )
        )

        self.input_conv = nn.Sequential()
        self.input_conv.add_module(
            "conv1", _ConvBlock(self.n_channels, 32, self.Norm, self.Conv)
        )
        self.input_conv.add_module("conv2", _ConvBlock(32, 64, self.Norm, self.Conv))

        self.output_conv = nn.Sequential()
        self.output_conv.add_module("conv1", _ConvBlock(128, 64, self.Norm, self.Conv))
        self.output_conv.add_module("conv2", _ConvBlock(64, 64, self.Norm, self.Conv))
        self.output_conv.add_module(
            "conv3",
            out_conv(
                64,
                self.n_classes,
                conv_kwargs={
                    "kernel_size": 1,
                    "stride": 1,
                    "padding": 0,
                    "bias": False,
                },
                norm=self.Norm,
                conv=self.Conv,
                final_convolution_layer=self.final_convolution_layer,
                sigmoid_input_multiplier=self.sigmoid_input_multiplier,
            ),
        )
    def __init__(self, dataset: MusicDataset,
                 note_embedding_dim=20,
                 metadata_embedding_dim=30,
                 num_lstm_constraints_units=256,
                 num_lstm_generation_units=256,
                 linear_hidden_size=128,
                 num_layers=1,
                 dropout_input_prob=0.2,
                 dropout_prob=0.5,
                 unary_constraint=False,
                 teacher_forcing=True
                 ):
        super(ConstraintModelGaussianReg, self).__init__()
        self.dataset = dataset
        self.use_teacher_forcing = teacher_forcing
        self.teacher_forcing_prob = 0.5

        # === parameters ===
        # --- common parameters
        self.num_layers = num_layers
        self.num_units_linear = linear_hidden_size
        self.unary_constraint = unary_constraint
        unary_constraint_size = 1 if self.unary_constraint else 0

        # --- notes
        self.note_embedding_dim = note_embedding_dim
        self.num_lstm_generation_units = num_lstm_generation_units
        self.num_notes_per_voice = [len(d)
                                    for d in self.dataset.note2index_dicts
                                    ]
        # use also note_embeddings to embed unary constraints
        self.note_embeddings = ModuleList(
            [
                Embedding(num_embeddings + unary_constraint_size, self.note_embedding_dim)
                for num_embeddings in self.num_notes_per_voice
            ]
        )
        # todo different ways of merging constraints

        # --- metadatas
        self.metadata_embedding_dim = metadata_embedding_dim
        self.num_elements_per_metadata = [metadata.num_values
                                          for metadata in self.dataset.metadatas]
        # must add the number of voices
        self.num_elements_per_metadata.append(self.dataset.num_voices)
        # embeddings for all metadata except unary constraints
        self.metadata_embeddings = ModuleList(
            [
                Embedding(num_embeddings, self.metadata_embedding_dim)
                for num_embeddings in self.num_elements_per_metadata
            ]
        )
        # nn hyper parameters
        self.num_lstm_constraints_units = num_lstm_constraints_units
        self.dropout_input_prob = dropout_input_prob
        self.dropout_prob = dropout_prob

        lstm_constraint_num_hidden = [
                                         (self.metadata_embedding_dim * len(
                                             self.num_elements_per_metadata)
                                          + self.note_embedding_dim * unary_constraint_size,
                                          self.num_lstm_constraints_units)
                                     ] + [(self.num_lstm_constraints_units,
                                           self.num_lstm_constraints_units)] * (
                                             self.num_layers - 1)
        # trainable parameters

        self.lstm_constraint = nn.ModuleList([nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=1,
            dropout=dropout_prob,
            batch_first=True)
            for input_size, hidden_size in lstm_constraint_num_hidden
        ])
        lstm_generation_num_hidden = [
                                         (self.note_embedding_dim + self.num_lstm_constraints_units,
                                          self.num_lstm_constraints_units)
                                     ] + [(self.num_lstm_constraints_units,
                                           self.num_lstm_constraints_units)] * (
                                             self.num_layers - 1)

        self.lstm_generation = nn.ModuleList([nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=1,
            dropout=dropout_prob,
            batch_first=True)
            for input_size, hidden_size in lstm_generation_num_hidden
        ]
        )
        self.linear_1 = nn.Linear(self.num_lstm_generation_units, linear_hidden_size)
        self.linear_ouput_notes = ModuleList(
            [
                nn.Linear(self.num_units_linear, num_notes)
                for num_notes in self.num_notes_per_voice
            ]
        )
        # todo test real dropout input
        self.dropout_layer = nn.Dropout2d(p=dropout_input_prob)
        self.dropout_lstm_layer = nn.Dropout(p=dropout_prob)

        self.optimizer = torch.optim.Adam(self.parameters())

        cur_dir = os.path.dirname(os.path.realpath(__file__))
        self.filepath = os.path.join(cur_dir, 'models/',
                                     self.__repr__())