def __init__(self,
              in_size,
              out_size,
              nl=nn.ReLU(),
              drop=0.,
              bias=True,
              excitability=False,
              excit_buffer=False,
              batch_norm=False,
              gated=False):
     super().__init__()
     if drop > 0:
         self.dropout = nn.Dropout(drop)
     self.linear = em.LinearExcitability(in_size,
                                         out_size,
                                         bias=False if batch_norm else bias,
                                         excitability=excitability,
                                         excit_buffer=excit_buffer)
     if batch_norm:
         self.bn = nn.BatchNorm1d(out_size)
     if gated:
         self.gate = nn.Linear(in_size, out_size)
         self.sigmoid = nn.Sigmoid()
     if isinstance(nl, nn.Module):
         self.nl = nl
     elif not nl == "none":
         self.nl = nn.ReLU() if nl == "relu" else (
             nn.LeakyReLU() if nl == "leakyrelu" else utils.Identity())
Esempio n. 2
0
    def __init__(self, image_size, image_channels, classes,
                 fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=True, fc_nl="relu", z_dim=20):

        # Set configurations
        super().__init__()
        self.label = "VAE"
        self.image_size = image_size
        self.image_channels = image_channels
        self.classes = classes
        self.fc_layers = fc_layers
        self.z_dim = z_dim
        self.fc_units = fc_units

        # Training related components that should be set before training
        # -criterion for reconstruction
        self.recon_criterion = None
        # -weigths of different components of the loss function
        self.lamda_rcl = 1.
        self.lamda_vl = 1.
        self.lamda_pl = 0. # --> when used as "classifier with feedback-connections", this should be set to 1.

        # Check whether there is at least 1 fc-layer
        if fc_layers<1:
            raise ValueError("VAE cannot have 0 fully-connected layers!")


        ######------SPECIFY MODEL------######

        # encoder: flatten image to 2D-tensor
        self.flatten = utils.Flatten()
        # encoder: fully connected hidden layers
        self.fcE = linear_nets.MLP(
            input_size=image_channels*image_size**2, output_size=fc_units, layers=fc_layers-1, hid_size=fc_units,
            drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, final_nl=True,
        )
        enc_mlp_output_size = fc_units if fc_layers>1 else image_channels*image_size**2

        # classifier (from final hidden layer of encoder)
        self.classifier = nn.Sequential(nn.Dropout(fc_drop),
                                        eM.LinearExcitability(enc_mlp_output_size, classes))

        # reparametrization ("to Z and back")
        out_nl = True if fc_layers>1 else False
        dec_mlp_input_size = fc_units if fc_layers>1 else image_channels*image_size**2
        self.toZ = nn.Linear(enc_mlp_output_size, z_dim)       # estimating mean
        self.toZlogvar = nn.Linear(enc_mlp_output_size, z_dim) # estimating log(SD**2)
        self.fromZ = linear_nets.fc_layer(z_dim, dec_mlp_input_size, batch_norm=(out_nl and fc_bn),
                                         nl=fc_nl if out_nl else "none")

        # decoder: fully connected hidden layers (with no non-linearity or batchnorm in final layer!)
        self.fcD = linear_nets.MLP(
            input_size=fc_units, output_size=image_channels*image_size**2, layers=fc_layers-1, hid_size=fc_units,
            drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, final_nl=False,
        )
        # decoder: reshape to image
        self.reshapeD = utils.ToImage(image_channels=image_channels)
Esempio n. 3
0
    def __init__(self,
                 image_size,
                 image_channels,
                 classes,
                 fc_layers=3,
                 fc_units=1000,
                 fc_drop=0,
                 fc_bn=True,
                 fc_nl="relu",
                 bias=True,
                 excitability=False,
                 excit_buffer=False):

        # configurations
        super().__init__()
        self.classes = classes
        self.label = "Classifier"

        # check whether there is at least 1 fc-layer
        if fc_layers < 1:
            raise ValueError(
                "The classifier needs to have at least 1 fully-connected layer."
            )

        ######------SPECIFY MODEL------######

        # flatten image to 2D-tensor
        self.flatten = utils.Flatten()

        # fully connected hidden layers
        self.fcE = MLP(input_size=image_channels * image_size**2,
                       output_size=fc_units,
                       layers=fc_layers - 1,
                       hid_size=fc_units,
                       drop=fc_drop,
                       batch_norm=fc_bn,
                       nl=fc_nl,
                       final_nl=True,
                       bias=bias,
                       excitability=excitability,
                       excit_buffer=excit_buffer)
        mlp_output_size = fc_units if fc_layers > 1 else image_channels * image_size**2

        # classifier
        self.classifier = nn.Sequential(
            nn.Dropout(fc_drop),
            eM.LinearExcitability(mlp_output_size, classes, excit_buffer=True))
 def __init__(self,
              in_size,
              out_size,
              drop=0.,
              bias=True,
              excitability=False,
              excit_buffer=False,
              batch_norm=False,
              nl="relu"):
     super().__init__()
     self.dropout = nn.Dropout(drop)
     self.linear = em.LinearExcitability(in_size,
                                         out_size,
                                         bias=False if batch_norm else bias,
                                         excitability=excitability,
                                         excit_buffer=excit_buffer)
     self.bn = nn.BatchNorm1d(out_size) if batch_norm else utils.Identity()
     self.nl = nn.ReLU() if nl == "relu" else (
         nn.LeakyReLU() if nl == "leakyrelu" else utils.Identity())