Ejemplo n.º 1
0
    def __init__(self, hidden_size):
        super(AE_1, self).__init__()
        self.hidden_size = hidden_size
        self.relu = nn.ReLU(inplace=True)
        self.sigmoid = nn.Sigmoid()

        self.conv1 = nn.Conv1d(1, 16, kernel_size=3, stride=2, padding=2)
        self.pool1 = nn.MaxPool1d(2, stride=2, return_indices=True)
        self.conv2 = nn.Conv1d(16, 8, kernel_size=3, stride=2, padding=2)
        self.pool2 = nn.MaxPool1d(2, stride=1, return_indices=True)
        self.fc1 = nn.Linear(8 * 256, self.hidden_size)

        self.fc2 = nn.Linear(self.hidden_size, 8 * 256)
        self.unpool2 = nn.MaxUnpool1d(2, stride=1)
        self.deconv2 = nn.ConvTranspose1d(8,
                                          16,
                                          kernel_size=3,
                                          stride=2,
                                          padding=2)
        self.unpool1 = nn.MaxUnpool1d(2, stride=2)
        self.deconv1 = nn.ConvTranspose1d(16,
                                          1,
                                          kernel_size=3,
                                          stride=2,
                                          padding=2)
Ejemplo n.º 2
0
    def __init__(self, shape=(1, 300)):
        super(torchAutoEnc, self).__init__()

        # Creating model layers. Padding crap had to be figured out manually
        self.e1 = nn.Conv1d(1, 10, kernel_size=9, padding=4)
        self.r1 = nn.ReLU()
        self.m1 = nn.MaxPool1d(2, return_indices=True)
        self.e2 = nn.Conv1d(10, 10, kernel_size=19, padding=10)
        self.r2 = nn.ReLU()
        self.m2 = nn.MaxPool1d(2, return_indices=True)
        self.h_l, self.u_l = self.get_dimension(shape)
        self.e3 = nn.Linear(self.h_l, 100)
        self.r3 = nn.ReLU()
        self.e4 = nn.Linear(100, 10)
        self.r4 = nn.ReLU()
        self.d4 = nn.Linear(10, 100)
        self.d4.weight = torch.nn.Parameter(self.e4.weight.t())
        self.dr4 = nn.ReLU()
        self.d3 = nn.Linear(100, self.h_l)
        self.d3.weight = torch.nn.Parameter(self.e3.weight.t())
        self.dr3 = nn.ReLU()
        self.u2 = nn.MaxUnpool1d(2)
        self.d2 = nn.Conv1d(10, 10, kernel_size=10, padding=5)
        self.d2.weight = torch.nn.Parameter(self.e2.weight.t())
        self.dr2 = nn.ReLU()
        self.u1 = nn.MaxUnpool1d(2)
        self.d1 = nn.Conv1d(10, 1, kernel_size=20, padding=10)
        self.d1.weight = torch.nn.Parameter(self.e1.weight.t())
        self.dr1 = nn.ReLU()

        # Classification layer
        self.eClassify = nn.Linear(10, 2)
        self.erClassify = nn.Softmax()
Ejemplo n.º 3
0
Archivo: models.py Proyecto: lixww/sgp
 def __init__(self, input_dim):
     super(cae, self).__init__()
     self.inp_dim = input_dim
     self.encoder = nn.Sequential(
         nn.Conv1d(1, 128, kernel_size=2, stride=1),
         nn.MaxPool1d(2, return_indices=True),
         nn.ReLU(),
         nn.Conv1d(128, 64, kernel_size=2, stride=1),
         nn.MaxPool1d(2, return_indices=True),
         nn.ReLU(),
         nn.Conv1d(64, 32, kernel_size=2, stride=1),
         nn.MaxPool1d(2, return_indices=True),
         nn.ReLU(),
         nn.Conv1d(32, 3, kernel_size=2, stride=1),
         nn.ReLU(),
     )
     self.decoder = nn.Sequential(
         nn.ConvTranspose1d(3, 32, kernel_size=2, stride=1),
         nn.ReLU(),
         nn.MaxUnpool1d(2),
         nn.ConvTranspose1d(32, 64, kernel_size=2, stride=1),
         nn.ReLU(),
         nn.MaxUnpool1d(2),
         nn.ConvTranspose1d(64, 128, kernel_size=2, stride=1),
         nn.ReLU(),
         nn.MaxUnpool1d(2),
         nn.ConvTranspose1d(128, 1, kernel_size=2, stride=1),
         nn.ReLU(),
     )
Ejemplo n.º 4
0
    def __init__(self, first_size):
        super(Autoencoder, self).__init__()
        print("First size: ", first_size)

        self.first_size = first_size
        print("First size ", self.first_size)
        self.dimensions = 20
        self.kernel_size = 20
        self.pooling_value = 4
        self.elements_in_dimension = int(
            (first_size - (self.kernel_size - 1)) / self.pooling_value)
        print("Elements ", self.elements_in_dimension)

        # wyliczane w trakcie działania programu
        self.this_size = 0
        self.indices = 0

        self.encoder_conv = nn.Conv1d(1, self.dimensions, self.kernel_size)
        self.encoder_pool = nn.MaxPool1d(self.pooling_value,
                                         stride=self.pooling_value,
                                         return_indices=True)
        self.encoder_linear = nn.Linear(self.elements_in_dimension, 1)

        self.decoder_linear = nn.Linear(1, self.elements_in_dimension)
        self.decoder_pool = nn.MaxUnpool1d(self.pooling_value,
                                           stride=self.pooling_value)
        self.decoder_conv = nn.ConvTranspose1d(self.dimensions, 1,
                                               self.kernel_size)
Ejemplo n.º 5
0
    def __init__(self, input_size, out_size, kernel_size):
        super(LadderNetwork, self).__init__()
        self.enccov1d1 = nn.Conv1d(input_size, 150, kernel_size)
        self.enccov1d2 = nn.Conv1d(150, 100, kernel_size)
        self.enccov1d3 = nn.Conv1d(100, 30, kernel_size)
        self.maxpool1d = nn.MaxPool1d(2, return_indices=True)
        self.erelu1 = nn.ReLU()
        self.erelu2 = nn.ReLU()
        self.erelu3 = nn.ReLU()
        self.drelu1 = nn.ReLU()
        self.drelu2 = nn.ReLU()
        self.drelu3 = nn.ReLU()
        self.softmax = nn.Softmax()
        self.encodefc1 = nn.Linear(420, out_size)
        self.ebatch1 = nn.BatchNorm1d(150)
        self.ebatch2 = nn.BatchNorm1d(100)
        self.ebatch3 = nn.BatchNorm1d(30)

        self.dbatch1 = nn.BatchNorm1d(input_size)
        self.dbatch2 = nn.BatchNorm1d(150)
        self.dbatch3 = nn.BatchNorm1d(100)
        self.decodefc1 = nn.Linear(out_size, 420)

        self.decodeuppool = nn.MaxUnpool1d(2)
        self.deccov1d3 = nn.ConvTranspose1d(30, 100, kernel_size)
        self.deccov1d2 = nn.ConvTranspose1d(100, 150, kernel_size)
        self.deccov1d1 = nn.ConvTranspose1d(150, input_size, kernel_size)
Ejemplo n.º 6
0
 def forward(self,x):
     #encoder part
     xnor=nn.BatchNorm1d(1,momentum=0.5)
     xnor2=nn.BatchNorm1d(8,momentum=0.5)
     xnorde=nn.BatchNorm1d(8,momentum=0.5)
     dropout=nn.Dropout(0.2)
     
     #x=xnor(x)
     out,indices1=self.encod1(x)
     out=dropout(out)
     
     #out=xnor2(out)
     out,indices2=self.encod2(out)
     
     #decoder
     unmax=nn.MaxUnpool1d(14,stride=14)
     out=unmax(out,indices2)
     out=self.decod1(out)
     out=dropout(out)
     
     # out=xnorde(out)
     out=unmax(out,indices1)
     out=self.decod2(out)
     
         
     #out,ind1,ind2=self.encoder(x)
     #out=self.decoder(out,ind1,ind2)
     return out
Ejemplo n.º 7
0
    def __init__(self):
        super(ComplexConvDecoder, self).__init__()
        self.decode1 = nn.Conv1d(16, 8, 125, padding=62)
        self.decode2 = nn.Conv1d(8, 4, 251, padding=125)
        self.decode3 = nn.Conv1d(4, 1, 501, padding=250)

        self.unpool = nn.MaxUnpool1d(10, stride=10)
        self.activation_func = nn.ReLU()
Ejemplo n.º 8
0
 def forward(self, x, indices=None):
     if self.pool_size < 2:
         return x
     if indices is None:
         x = F.interpolate(x, scale_factor=self.pool_size)
     else:
         x = nn.MaxUnpool1d(kernel_size=self.pool_size)(x, indices=indices)
     return x
 def __init__(self, input_size, feature_sizes, kernel_sizes):
     '''
     Args:
         input_size: an int list or tuple with length=2, the size of input data. (sensor_number, data_length)
         feature_sizes: an int list or tuple with length=2. feature_size[1] is the number of encoded features.
         kernel_sizes: an int list or tuple whose length = 2. 
             Store the kernel sizes of the 2 Conv1d layers.
             ***********Please set kernel sizes as odd numbers.***********
     '''
     super(cnn_encoder_decoder, self).__init__()
     self.input_size = input_size
     self.feature_sizes = feature_sizes
     self.kernel_sizes = kernel_sizes
     self.padding = [
         (i - 1) // 2 for i in kernel_sizes
     ]  # Setting kernel sizes as odd numbers, the padding method will keep the data length unchanged.
     # encoder
     self.encoder_c1 = nn.Conv1d(input_size[0],
                                 feature_sizes[0],
                                 kernel_sizes[0],
                                 padding=self.padding[0])  # length
     self.encoder_a1 = nn.ReLU()
     self.encoder_p1 = nn.MaxPool1d(
         2, return_indices=True)  # (length+delta[0])/2
     self.encoder_c2 = nn.Conv1d(feature_sizes[0],
                                 feature_sizes[1],
                                 kernel_sizes[1],
                                 padding=self.padding[1])  # length/2
     self.encoder_a2 = nn.ReLU()
     self.encoder_p2 = nn.MaxPool1d(
         2, return_indices=True)  # length+delta[0]/4
     # decoder
     self.decoder_up1 = nn.MaxUnpool1d(2)  # length/2
     self.decoder_c1 = nn.Conv1d(feature_sizes[1],
                                 feature_sizes[0],
                                 kernel_sizes[1],
                                 padding=self.padding[1])  # length/2
     self.decoder_a1 = nn.PReLU()
     self.decoder_up2 = nn.MaxUnpool1d(2)  # length
     self.decoder_c2 = nn.Conv1d(feature_sizes[0],
                                 input_size[0],
                                 kernel_sizes[0],
                                 padding=self.padding[0])  # length
     self.decoder_a2 = nn.PReLU()
Ejemplo n.º 10
0
def addTransitionBack(model, nChannels, nOutChannels, dropout):
    model.unpoolTB = nn.MaxUnpool1d(2)
    model.batchnormTB = nn.BatchNorm1d(nChannels)
    model.reluTB = nn.ReLU()
    model.convTB = nn.Conv1d(nChannels,
                             nOutChannels,
                             kernel_size=12,
                             padding=6)
    if dropout > 0:
        model.dropoutTB = nn.Dropout(dropout)
Ejemplo n.º 11
0
    def __init__(self, cutoff=0, lstm_layers=5, activation=nn.Tanh):
        super(ConvLSTMAE_v1, self).__init__()
        self.cutoff = cutoff

        #encode
        self.conv0 = nn.Conv1d(100, 200, 4, stride=2, padding=1)
        self.tanconv0 = activation()
        self.conv1 = nn.Conv1d(200, 400, 4, stride=2, padding=0)
        self.tanconv1 = activation()
        self.maxpool0 = nn.MaxPool1d(3, return_indices=True)
        self.tanmaxpool0 = activation()
        self.conv2 = nn.Conv1d(400, 800, 4, stride=2, padding=2)
        self.tanconv2 = activation()
        self.conv3 = nn.Conv1d(800, 1600, 4, stride=2, padding=0)
        self.tanconv3 = activation()
        self.maxpool1 = nn.MaxPool1d(2, return_indices=True)
        self.relumaxpool1 = activation()

        #lstm
        self.lstm0 = nn.LSTM(20, 20, lstm_layers, batch_first=True)
        self.tanlstm0 = activation()

        nn.init.xavier_uniform_(self.lstm0.weight_ih_l0, gain=np.sqrt(2))
        nn.init.xavier_uniform_(self.lstm0.weight_hh_l0, gain=np.sqrt(2))

        #decode
        self.maxunpool0 = nn.MaxUnpool1d(2)
        self.tanmaxunpool0 = activation()
        self.convt0 = nn.ConvTranspose1d(1600, 800, 4, stride=2, padding=0)
        self.tanconvt0 = activation()
        self.convt1 = nn.ConvTranspose1d(800, 400, 4, stride=2, padding=2)
        self.tanconvt1 = activation()
        self.maxunpool1 = nn.MaxUnpool1d(3)
        self.tanmaxunpool1 = activation()
        self.convt2 = nn.ConvTranspose1d(400, 200, 4, stride=2, padding=0)
        self.tanconvt2 = activation()
        self.convt3 = nn.ConvTranspose1d(200, 100, 4, stride=2, padding=1)
        self.tanconvt3 = activation()

        #output
        self.lin0 = nn.Linear(2004, 2004)
        self.relulin0 = nn.Sigmoid()
Ejemplo n.º 12
0
 def forward(self, x, seq_len=None, indices=None):
     if self.pool_size < 2:
         return x, seq_len
     if indices is None:
         x = F.interpolate(x, scale_factor=self.pool_size)
     else:
         x = nn.MaxUnpool1d(kernel_size=self.pool_size)(x, indices=indices)
     if seq_len is not None:
         seq_len = seq_len * self.pool_size
         seq_len = np.maximum(seq_len, x.shape[-1])
     return x, seq_len
 def __init__(self,
              in_channels,
              out_channels,
              kernel_sizes=[9, 19, 39],
              bottleneck_channels=32,
              activation=nn.ReLU()):
     """
     : param in_channels				Number of input channels (input features)
     : param n_filters				Number of filters per convolution layer => out_channels = 4*n_filters
     : param kernel_sizes			List of kernel sizes for each convolution.
                                     Each kernel size must be odd number that meets -> "kernel_size % 2 !=0".
                                     This is nessesery because of padding size.
                                     For correction of kernel_sizes use function "correct_sizes".
     : param bottleneck_channels		Number of output channels in bottleneck.
                                     Bottleneck wont be used if nuber of in_channels is equal to 1.
     : param activation				Activation function for output tensor (nn.ReLU()).
     """
     super(InceptionTimeTransposeModule, self).__init__()
     self.activation = activation
     self.conv_to_bottleneck_1 = nn.ConvTranspose1d(
         in_channels=in_channels,
         out_channels=bottleneck_channels,
         kernel_size=kernel_sizes[0],
         stride=1,
         padding=kernel_sizes[0] // 2,
         bias=False)
     self.conv_to_bottleneck_2 = nn.ConvTranspose1d(
         in_channels=in_channels,
         out_channels=bottleneck_channels,
         kernel_size=kernel_sizes[1],
         stride=1,
         padding=kernel_sizes[1] // 2,
         bias=False)
     self.conv_to_bottleneck_3 = nn.ConvTranspose1d(
         in_channels=in_channels,
         out_channels=bottleneck_channels,
         kernel_size=kernel_sizes[2],
         stride=1,
         padding=kernel_sizes[2] // 2,
         bias=False)
     self.conv_to_maxpool = nn.Conv1d(in_channels=in_channels,
                                      out_channels=out_channels,
                                      kernel_size=1,
                                      stride=1,
                                      padding=0,
                                      bias=False)
     self.max_unpool = nn.MaxUnpool1d(kernel_size=3, stride=1, padding=1)
     self.bottleneck = nn.Conv1d(in_channels=3 * bottleneck_channels,
                                 out_channels=out_channels,
                                 kernel_size=1,
                                 stride=1,
                                 bias=False)
     self.batch_norm = nn.BatchNorm1d(num_features=out_channels)
Ejemplo n.º 14
0
 def __init__(self):
     super(big_navigation_model, self).__init__()
     self.conv1 = nn.Conv2d(1, 16, kernel_size=(2, 3), padding=(0, 1))
     self.conv2 = nn.Conv1d(16, 16, kernel_size=3, padding=1)
     self.pool = nn.MaxPool1d(2, 2, return_indices=True)
     self.conv3 = nn.Conv1d(16, 16, kernel_size=3, padding=1)
     self.conv4 = nn.Conv1d(16, 16, kernel_size=3, padding=1)
     self.conv5 = nn.Conv1d(16, 16, kernel_size=3, padding=1)
     self.unpool = nn.MaxUnpool1d(kernel_size=2, stride=2)
     self.conv6 = nn.Conv1d(16, 16, kernel_size=3, padding=1)
     self.conv7 = nn.Conv1d(16, 16, kernel_size=3, padding=1)
     self.conv9 = nn.Conv1d(16, 16, kernel_size=3, padding=1)
     self.conv8 = nn.Conv1d(16, 1, kernel_size=3, padding=1)
    def forward(self, x):
        xnor2 = nn.BatchNorm1d(16, momentum=0.5)
        xnor3 = nn.BatchNorm1d(32, momentum=0.5)
        xnor4 = nn.BatchNorm1d(64, momentum=0.5)
        dropout = nn.Dropout(0.3)

        #encoder part
        out, indices1 = self.encod1(x)
        out = xnor2(dropout(out))
        out, indices2 = self.encod2(out)
        out = xnor3(dropout(out))
        out, indices3 = self.encod3(out)
        out = xnor4(dropout(out))
        out, indices4 = self.encod4(out)

        #decoder
        unmax = nn.MaxUnpool1d(13, stride=3)
        unmax1 = nn.MaxUnpool1d(12, stride=3)
        unmax2 = nn.MaxUnpool1d(13, stride=3)
        unmax3 = nn.MaxUnpool1d(12, stride=3)

        out = unmax(out, indices4)
        out = self.decod1(out)
        out = xnor4(dropout(out))

        out = unmax1(out, indices3)
        out = self.decod2(out)
        out = xnor3(dropout(out))

        out = unmax2(out, indices2)
        out = self.decod3(out)
        out = xnor2(dropout(out))

        out = unmax3(out, indices1)
        out = self.decod4(out)

        #out,ind1,ind2=self.encoder(x)
        #out=self.decoder(out,ind1,ind2)
        return out
Ejemplo n.º 16
0
    def forward(self,x):
        maxpool=nn.MaxPool1d(12,stride=7,return_indices=True)
        activate=nn.Tanh()
        xnor3=nn.BatchNorm1d(32,momentum=0.5)
        xnor4=nn.BatchNorm1d(64,momentum=0.5)
        xnor5=nn.BatchNorm1d(128,momentum=0.5)
        xnor6=nn.BatchNorm1d(256,momentum=0.5)

        #encoder part
        out=self.encod1(x)
        out=activate(xnor3(out))
        out,indices1=maxpool(out)

        out=self.encod2(out)
        out=activate(xnor4(out))
        out,indices2=maxpool(out)

        out=self.encod3(out)
        out=activate(xnor5(out))
        out,indices3=maxpool(out)


        out=self.encod4(out)
        out=activate(xnor6(out))
        out,indices4=maxpool(out)
        
        #decoder
        unmax=nn.MaxUnpool1d(12,stride=7)

        out=unmax(out,indices4)
        out=activate(xnor6(out))
        out=self.decod1(out)
        

        out=unmax(out,indices3)
        out=activate(xnor5(out))
        out=self.decod2(out)
        

        out=unmax(out,indices2)
        out=activate(xnor4(out))
        out=self.decod3(out)
        

        out=unmax(out,indices1)
        out=activate(xnor3(out))
        out=self.decod4(out)
        
        return out
Ejemplo n.º 17
0
    def __init__(self):
        super().__init__()
        self.encoder = nn.ModuleList(
                    [
                        nn.Conv1d(in_channels=1, out_channels=16,
                            kernel_size=3, stride=1, padding=1),
                        nn.BatchNorm1d(16),
                        nn.ReLU(inplace=True),
                        nn.MaxPool1d(kernel_size=3, padding=0, return_indices=True),
                        nn.Conv1d(in_channels=16, out_channels=32,
                            kernel_size=3, stride=1, padding=1),
                        nn.BatchNorm1d(32),
                        nn.ReLU(inplace=True),
                        nn.MaxPool1d(kernel_size=3, padding=0, return_indices=True),
                        nn.Conv1d(in_channels=32, out_channels=64,
                            kernel_size=3, stride=1, padding=1),
                        nn.ReLU(inplace=True)
                        ]
                        )
        self.decoder = nn.ModuleList([
                        nn.Conv1d(in_channels=64, out_channels=32,
                            kernel_size=3, stride=1, padding=1),
                        nn.BatchNorm1d(32),
                        nn.ReLU(inplace=True),
                        nn.MaxUnpool1d(kernel_size=3, padding=0),
                        nn.Conv1d(in_channels=32, out_channels=16,
                            kernel_size=3, stride=1,  padding=1),
                        nn.BatchNorm1d(16),
                        nn.ReLU(inplace=True),
                        nn.MaxUnpool1d(kernel_size=3, padding=0),
                        nn.Conv1d(in_channels=16, out_channels=1,
                            kernel_size=3, stride=1, padding=1)
                            ]
                        )

        self.indices = []
Ejemplo n.º 18
0
    def __init__(self, delta=1, window=(3, 3), device=torch.device('cpu')):
        super().__init__()
        self.device = device
        self.window = window
        self.influence_window = (window[0] + 4 * delta, window[1] + 4 * delta)
        self.delta = delta
        self.num_bins = window[0] * window[1]
        self.num_possible_window_inputs = 2**self.num_bins
        self.possible_inputs = np.zeros(
            (self.num_possible_window_inputs, self.num_bins))

        # compute all possible
        for i in range(self.num_possible_window_inputs):
            self.possible_inputs[i] = np.array(list(
                np.binary_repr(i, self.num_bins)),
                                               dtype=np.float)
        self.possible_inputs = self.possible_inputs.reshape(
            (1, self.num_possible_window_inputs, window[0], window[1]))

        self.possible_inputs_mask = np.zeros(
            (1, self.num_possible_window_inputs, self.influence_window[0],
             self.influence_window[1]),
            dtype=np.float)
        self.possible_inputs_mask[:, :, 2 * delta:-2 * delta,
                                  2 * delta:-2 * delta] = 1
        self.possible_inputs_window = np.zeros(
            (1, self.num_possible_window_inputs, self.influence_window[0],
             self.influence_window[1]),
            dtype=np.float)
        self.possible_inputs_window[:, :, 2 * delta:-2 * delta, 2 * delta:-2 *
                                    delta] = self.possible_inputs

        self.pi = torch.from_numpy(self.possible_inputs).float().to(device)
        self.pi_window = torch.from_numpy(
            self.possible_inputs_window).float().to(device)
        self.pi_window_mask = torch.from_numpy(
            self.possible_inputs_mask).float().to(device)
        self.pi_window_inv_mask = -self.pi_window_mask + 1

        self.replication_input_layer = TilePad2d(delta * 2,
                                                 delta * 2 + window[1] - 1,
                                                 delta * 2,
                                                 delta * 2 + window[0] - 1)

        self.replication_target_layer = TilePad2d(delta, delta + window[1] - 1,
                                                  delta, delta + window[0] - 1)

        self.unpool = nn.MaxUnpool1d(self.num_possible_window_inputs)
Ejemplo n.º 19
0
 def __init__(self, training_parameters):
     super(Backend, self).__init__()
     self.training_parameters = training_parameters
     unpoolparameters = unpoolParameters(
         64, self.training_parameters.frame_length)
     self.unpooling = nn.MaxUnpool1d(
         kernel_size=unpoolparameters['kernel_size'],
         stride=unpoolparameters['stride'],
         padding=unpoolparameters['padding'])
     self.dense1 = nn.Linear(in_features=128, out_features=64)
     self.dense2 = nn.Linear(in_features=64, out_features=64)
     self.dense3 = nn.Linear(in_features=64, out_features=64)
     self.dense4 = nn.Linear(in_features=64, out_features=128)
     self.softplus = nn.Softplus()
     self.ReLU = nn.ReLU()
     self.inverseConv = nn.ConvTranspose1d(
         self.training_parameters.frame_length,
         self.training_parameters.frame_length,
         kernel_size=2,
         padding=64)
    def forward(self, x):
        maxpool = nn.MaxPool1d(4, stride=2, return_indices=True)
        xnor3 = nn.BatchNorm1d(32, momentum=0.5)
        xnor4 = nn.BatchNorm1d(64, momentum=0.5)
        xnor5 = nn.BatchNorm1d(128, momentum=0.5)

        out = self.encod(x)
        #encode
        out = self.encod1(out)
        out = self.tanh(xnor3(out))
        out, indices1 = maxpool(out)
        out = self.layer1(out)

        out = self.encod2(out)
        out = self.tanh(xnor4(out))
        out, indices2 = maxpool(out)
        out = self.layer2(out)

        out = self.encod3(out)
        out = self.tanh(xnor5(out))
        out, indices3 = maxpool(out)

        #decoder partition
        unmax = nn.MaxUnpool1d(4, stride=2)

        out = unmax(out, indices3)
        out = self.tanh(xnor5(out))
        out = self.decod2(out)
        out = self.layer2(out)

        out = unmax(out, indices2)
        out = self.tanh(xnor4(out))
        out = self.decod3(out)
        out = self.layer1(out)

        out = unmax(out, indices1)
        out = self.tanh(xnor3(out))
        out = self.decod4(out)

        return out
Ejemplo n.º 21
0
    def _get_raw_layer(self, direction, in_channels, out_channels, kernel_size, pool_size, stride):
        layers = []
        if direction == "conv":
            layers.append(nn.Conv1d(in_channels, out_channels,
                                    kernel_size, stride=stride))
            layers.append(nn.ReLU(inplace=True))

            layers.append(nn.Conv1d(out_channels, out_channels,
                                    kernel_size, stride=stride))
            layers.append(nn.ReLU(inplace=True))
            layers.append(nn.MaxPool1d(pool_size, return_indices=True))
        else:
            layers.append(nn.MaxUnpool1d(pool_size))
            layers.append(nn.ConvTranspose1d(
                out_channels, out_channels, kernel_size, stride=stride))
            layers.append(nn.ReLU(inplace=True))

            layers.append(nn.ConvTranspose1d(
                out_channels, in_channels, kernel_size, stride=stride))
            layers.append(nn.ReLU(True))

        return layers
Ejemplo n.º 22
0
 def forward(self, x, sequence_lengths=None, indices=None):
     if self.pool_size < 2:
         return x, sequence_lengths
     if indices is None:
         x = F.interpolate(x, scale_factor=self.stride)
     else:
         x = nn.MaxUnpool1d(kernel_size=self.pool_size,
                            stride=self.stride)(x, indices=indices)
         front_pad, end_pad = compute_pad_size(self.pool_size, 1,
                                               self.stride, self.pad_type)
         end_pad = np.maximum(
             np.array(end_pad) - np.array(self.stride) + 1, 0)
         if front_pad > 0:
             x = Trim(side='front')(x, size=front_pad)
         if end_pad > 0:
             x = Trim(side='end')(x, size=end_pad)
     if sequence_lengths is not None:
         sequence_lengths = _compute_transpose_out_size(
             sequence_lengths, self.pool_size, 1, self.stride,
             self.pad_type)
         # sequence_lengths = np.maximum(sequence_lengths, x.shape[-1])
     return x, sequence_lengths
Ejemplo n.º 23
0
def add_vgg_conv_block(index, layers, in_channels, out_channels,
                       kernel_size, pool_size, stride=1,
                       batch_norm=False, direction="conv",
                       return_indices=False):
    if direction == "conv":
        layers["conv_{}".format(index)] = nn.Conv1d(
            in_channels, out_channels, kernel_size, stride=stride)
        if batch_norm:
            layers["batchnorm_{}".format(index)] = nn.BatchNorm1d(out_channels)
        layers["relu_{}".format(index)] = nn.ReLU(inplace=True)
        index += 1

        layers["conv_{}".format(index)] = nn.Conv1d(
            out_channels, out_channels, kernel_size, stride=stride)
        if batch_norm:
            layers["batchnorm_{}".format(index)] = nn.BatchNorm1d(out_channels)
        layers["relu_{}".format(index)] = nn.ReLU(inplace=True)
        layers["pool_{}".format(index)] = nn.MaxPool1d(
            pool_size, return_indices=return_indices)
        index += 1
        return index
    elif direction == "deconv":
        layers["pool_{}".format(index)] = nn.MaxUnpool1d(pool_size)

        layers["conv_{}".format(index)] = nn.ConvTranspose1d(
            out_channels, out_channels, kernel_size, stride=stride)
        if batch_norm:
            layers["batchnorm_{}".format(index)] = nn.BatchNorm1d(out_channels)
        layers["relu_{}".format(index)] = nn.ReLU(inplace=True)
        index -= 1

        layers["conv_{}".format(index)] = nn.ConvTranspose1d(
            in_channels, out_channels, kernel_size, stride=stride)
        if batch_norm:
            layers["batchnorm_{}".format(index)] = nn.BatchNorm1d(out_channels)
        layers["relu_{}".format(index)] = nn.ReLU(inplace=True)
        index -= 1
        return index
Ejemplo n.º 24
0
    def forward(self, x, kernel_size, unmax_str):
        maxpool = nn.MaxPool1d(kernel_size=kernel_size,
                               stride=unmax_str,
                               return_indices=True)
        activate = nn.Tanh()
        xnor3 = nn.BatchNorm1d(32, momentum=0.5)
        xnor4 = nn.BatchNorm1d(64, momentum=0.5)
        xnor5 = nn.BatchNorm1d(128, momentum=0.5)

        #encoder part
        out = self.encod1(x)
        out = activate(xnor3(out))
        out, indices1 = maxpool(out)

        out = self.encod2(out)
        out = activate(xnor4(out))
        out, indices2 = maxpool(out)

        out = self.encod3(out)
        out = activate(xnor5(out))
        out, indices3 = maxpool(out)

        #decoder
        unmax = nn.MaxUnpool1d(kernel_size=kernel_size, stride=unmax_str)

        out = unmax(out, indices3)
        out = activate(xnor5(out))
        out = self.decod1(out)

        out = unmax(out, indices2)
        out = activate(xnor4(out))
        out = self.decod2(out)

        out = unmax(out, indices1)
        out = activate(xnor3(out))
        out = self.decod3(out)

        return out
Ejemplo n.º 25
0
def make_layers_dec(cfg):
    layers = []
    conv_layers = []
    in_channels = cfg[0]
    cfg = cfg[1:]
    for i, v in enumerate(cfg):
        if v == 'M':
            layers += conv_layers  # [nn.Sequential(*conv_layers)]
            conv_layers = []
            layers += [nn.MaxUnpool1d(kernel_size=2, stride=2)]
        else:
            conv1d = nn.ConvTranspose1d(in_channels,
                                        v,
                                        kernel_size=3,
                                        padding=1)
            if i != len(cfg) - 1:
                conv_layers += [conv1d, nn.ReLU(inplace=True)]
            else:
                conv_layers += [conv1d]
            in_channels = v
    if len(conv_layers) > 0:
        layers += conv_layers  # [nn.Sequential(*conv_layers)]
    return layers
Ejemplo n.º 26
0
    def __init__(self, layers_sizes):
        super(Autoencoder, self).__init__()
        self.ker1, self.ker2, self.max_pool = layers_sizes

        self.p1 = nn.ConstantPad1d((self.ker1 // 2, (self.ker1 + 1) // 2 - 1),
                                   0)
        self.c1 = nn.Conv1d(in_channels=1,
                            out_channels=8,
                            kernel_size=self.ker1)
        self.m1 = nn.MaxPool1d(self.max_pool, return_indices=True)
        self.i1 = None
        self.c2 = nn.Conv1d(in_channels=8,
                            out_channels=64,
                            kernel_size=self.ker2,
                            padding=self.ker2 // 2)

        self.d1 = nn.ConvTranspose1d(in_channels=64,
                                     out_channels=8,
                                     kernel_size=self.ker2,
                                     padding=self.ker2 // 2)
        self.u1 = nn.MaxUnpool1d(self.max_pool)
        self.d2 = nn.ConvTranspose1d(in_channels=8,
                                     out_channels=1,
                                     kernel_size=self.ker1)
#%%
##>1.3 三维池化
m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
input = torch.randn(20, 16, 50, 44, 31)
out = m(input)
out.shape

#%% [markdown]
# - 逆池化相当于把缩小的图片尺寸恢复为原来的尺寸
# - 逆最大池化只能恢复最大的元素,其他的元素均为0

#%%
#>2. 逆最大池化
##>2.1 一维逆最大池化
pool = nn.MaxPool1d(2, stride=2, return_indices=True)
unpool = nn.MaxUnpool1d(2, stride=2)
input = torch.arange(1, 9, dtype=torch.float).reshape(-1, 8).unsqueeze(1)
input

#%%
out, indices = pool(input)
indices

#%%
unpool(out, indices)

#%%
input = torch.arange(1, 10, dtype=torch.float).reshape(-1, 9).unsqueeze(
    1)  ##注意input的size是9
out, indices = pool(input)
Ejemplo n.º 28
0
    def __init__(
        self,
        z_dim,
        maxpool,
        in_channels,
        out_channels,
        kernel_sizes,
        kernel_sizes_deconv,
        strides,
        strides_deconv,
        dilatations,
        dilatations_deconv,
        padding,
        padding_deconv,
        batchnorm,
        activation=torch.nn.GELU,
        flow_type="nf",
        n_flows=2,
        n_res=3,
        gated=True,
        has_dense=True,
        resblocks=False,
    ):
        super(Autoencoder1DCNN, self).__init__()

        if torch.cuda.is_available():
            device = 'cuda'
        else:
            device = 'cpu'

        self.device = device
        self.conv_layers = []
        self.deconv_layers = []
        self.bns = []
        self.resconv = []
        self.resdeconv = []
        self.bns_deconv = []
        self.indices = [torch.Tensor() for _ in range(len(in_channels))]
        self.GaussianSample = GaussianSample(z_dim, z_dim)
        self.activation = activation()

        self.n_res = n_res

        self.resblocks = resblocks
        self.has_dense = has_dense
        self.batchnorm = batchnorm
        self.a_dim = None
        for i, (ins, outs, ksize, stride, dilats, pad) in enumerate(
                zip(in_channels, out_channels, kernel_sizes, strides,
                    dilatations, padding)):
            if not gated:
                self.conv_layers += [
                    torch.nn.Conv1d(
                        in_channels=ins,
                        out_channels=outs,
                        kernel_size=ksize,
                        stride=stride,
                        padding=pad,
                        dilation=dilats,
                    )
                ]
            else:
                self.conv_layers += [
                    GatedConv1d(input_channels=ins,
                                output_channels=outs,
                                kernel_size=ksize,
                                stride=stride,
                                padding=pad,
                                dilation=dilats,
                                activation=nn.Tanh())
                ]
            if resblocks and i != 0:
                for _ in range(n_res):
                    self.resconv += [ResBlock(ins, outs, activation, device)]
            self.bns += [nn.BatchNorm1d(num_features=outs)]

        for i, (ins, outs, ksize, stride, dilats, pad) in enumerate(
                zip(reversed(out_channels), reversed(in_channels),
                    kernel_sizes_deconv, strides_deconv, dilatations_deconv,
                    padding_deconv)):
            if not gated:
                self.deconv_layers += [
                    torch.nn.ConvTranspose1d(in_channels=ins,
                                             out_channels=outs,
                                             kernel_size=ksize,
                                             padding=pad,
                                             stride=stride,
                                             dilation=dilats)
                ]
            else:
                self.deconv_layers += [
                    GatedConvTranspose1d(input_channels=ins,
                                         output_channels=outs,
                                         kernel_size=ksize,
                                         stride=stride,
                                         padding=pad,
                                         dilation=dilats,
                                         activation=nn.Tanh())
                ]
            if resblocks and i != 0:
                for _ in range(n_res):
                    self.resdeconv += [
                        ResBlockDeconv(ins, outs, activation, device)
                    ]

            self.bns_deconv += [nn.BatchNorm1d(num_features=outs)]

        self.dense1 = torch.nn.Linear(in_features=out_channels[-1],
                                      out_features=z_dim)
        self.dense2 = torch.nn.Linear(in_features=z_dim,
                                      out_features=out_channels[-1])
        self.dense1_bn = nn.BatchNorm1d(num_features=z_dim)
        self.dense2_bn = nn.BatchNorm1d(num_features=out_channels[-1])
        self.dropout = nn.Dropout(0.5)
        self.maxpool = nn.MaxPool1d(maxpool, return_indices=True)
        self.maxunpool = nn.MaxUnpool1d(maxpool)
        self.conv_layers = nn.ModuleList(self.conv_layers)
        self.deconv_layers = nn.ModuleList(self.deconv_layers)
        self.bns = nn.ModuleList(self.bns)
        self.bns_deconv = nn.ModuleList(self.bns_deconv)
        self.resconv = nn.ModuleList(self.resconv)
        self.resdeconv = nn.ModuleList(self.resdeconv)
        self.flow_type = flow_type
        self.n_flows = n_flows
        if self.flow_type == "nf":
            self.flow = NormalizingFlows(in_features=[z_dim], n_flows=n_flows)
        if self.flow_type == "hf":
            self.flow = HouseholderFlow(in_features=[z_dim],
                                        auxiliary=False,
                                        n_flows=n_flows,
                                        h_last_dim=z_dim)
        if self.flow_type == "iaf":
            self.flow = IAF(z_dim,
                            n_flows=n_flows,
                            num_hidden=n_flows,
                            h_size=z_dim,
                            forget_bias=1.,
                            conv1d=False)
        if self.flow_type == "ccliniaf":
            self.flow = ccLinIAF(in_features=[z_dim],
                                 auxiliary=False,
                                 n_flows=n_flows,
                                 h_last_dim=z_dim)
        if self.flow_type == "o-sylvester":
            self.flow = SylvesterFlows(in_features=[z_dim],
                                       flow_flavour='o-sylvester',
                                       n_flows=1,
                                       h_last_dim=None)
Ejemplo n.º 29
0
 def __init__(self, in_ch, out_ch):
     super(Up, self).__init__()
     self.unpool = nn.MaxUnpool1d(2)
     self.block = DoubleBlock(in_ch, out_ch)
outputs.size()

#%% [markdown]
# 池化层

#%%
pool = nn.MaxPool1d(kernel_size=2, stride=2)
inputs = torch.randn(20, 16, 100)
outputs = pool(inputs)
outputs.size()

#%%
inputs = torch.tensor([[[1, 2, 3, 4, 5]]], dtype=torch.float32)
pool = nn.MaxPool1d(kernel_size=2, stride=2, return_indices=True)
outputs, indices = pool(inputs)
unpool = nn.MaxUnpool1d(kernel_size=2, stride=2)
recovers = unpool(outputs, indices)
recovers

#%%
inputs = torch.tensor([[[1, 2, 3, 4, 5]]], dtype=torch.float32)
pool = nn.MaxPool1d(kernel_size=2, stride=2, return_indices=True)
outputs, indices = pool(inputs)
unpool = nn.MaxUnpool1d(kernel_size=2, stride=2)
recovers = unpool(outputs, indices, output_size=inputs.size())
recovers

#%%
pool = nn.MaxPool1d(2, stride=2, return_indices=True)
unpool = nn.MaxUnpool1d(2, stride=2)
inputs = torch.tensor([[[1, 2, 3, 4, 5, 6, 7, 8]]], dtype=torch.float32)