def __init__(self): super(SimplerCNN, self).__init__() self.dropout2d_input = nn.Dropout2d(rate=0.3) self.conv1 = nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, stride=3, padding=2) self.relu1 = nn.LeakyRelu() self.conv2 = nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, stride=3, padding=3) self.relu2 = nn.LeakyRelu() self.dropout2d_conv1 = nn.Dropout2d(rate=0.5) self.conv3 = nn.Conv2d(in_channels=30, out_channels=40, kernel_size=4) self.relu3 = nn.LeakyRelu() self.flatten = nn.Flatten() self.dropout2d_conv2 = nn.Dropout2d(rate=0.2) self.linear = nn.Linear(in_dimension=360, out_dimension=180) self.relu4 = nn.LeakyRelu() self.bn1 = nn.BatchNorm() self.dropout3 = nn.Dropout(rate=0.3) self.linear2 = nn.Linear(in_dimension=180, out_dimension=10) self.bn2 = nn.BatchNorm() self.softmax = nn.Softmax() self.set_forward()
def define_model(vocab_size, max_length): inputs1=m.Input(shape=(4096,)) fe1=m.Dropout(0.5)(inputs1) fe2=m.Dense(256, activation='relu')(fe1) inputs2=m.Input(shape=(max_length,)) se1=m.Embedding(vocab_size, 256, mask_zero=True)(inputs2) se2=m.Dropout(0.5)(se1) se3=m.LSTM(256)(se2) decoder1=m.add([fe2, se3]) decoder2=m.Dense(256, activation='relu')(decoder1) outputs=m.Dense(vocab_size, activation='softmax')(decoder2) model=m.Model(inputs=[inputs1, inputs2], outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='adam') model.summary() m.plot_model(model, to_file='model.png', show_shapes=True) return model
def __init__(self): super(NN, self).__init__() self.linear1 = nn.Linear(in_dimension=3072, out_dimension=256) self.relu1 = nn.LeakyRelu() self.dropout1 = nn.Dropout(rate=0.3) self.linear2 = nn.Linear(in_dimension=256, out_dimension=10) self.softmax = nn.Softmax() self.set_forward()
def __init__(self, layer_dims, index, position, noise_std, arglist): super(GRUED, self).__init__() # this module will hold the variables it needs to in a dictionary # it will also have a set of functions self.index = index self.layer_dims = layer_dims self.position = position self.noise_std = noise_std self.use_bn = True # encoding modules if self.position is 'first': en_indim = self.layer_dims[self.index] en_outdim = self.layer_dims[self.index] else: en_indim = layer_dims[self.index-1] en_outdim = layer_dims[self.index] # self.en_conv = nn.Conv2d(en_indim, en_outdim, bias=False, **arglist[self.index-1]) self.en_gru = ConvGRU(en_indim, en_outdim) self.en_bn_clean = nn.BatchNorm2d(en_outdim, affine=False) self.en_bn_noisy = nn.BatchNorm2d(en_outdim, affine=False) self.en_gamma = nn.Parameter(torch.rand(en_outdim, 1, 1)) self.en_beta = nn.Parameter(torch.rand(en_outdim, 1, 1)) self.en_nonlin = nn.Tanh() # decoding modules if self.position is 'last': de_indim = self.layer_dims[self.index] de_outdim = self.layer_dims[self.index] else: de_indim = self.layer_dims[self.index+1] de_outdim = self.layer_dims[self.index] self.de_conv = nn.ConvTranspose2d(de_indim, de_outdim, bias=False, **arglist[self.index]) self.de_bn = nn.BatchNorm2d(de_outdim, affine=False) self.de_gamma = nn.Parameter(torch.rand(de_outdim, 1, 1)) self.de_beta = nn.Parameter(torch.rand(de_outdim, 1, 1)) self.ver_dropout = modules.Dropout(0.5) self.lat_dropout = modules.Dropout(0.5) self.parsig1 = modules.ParamSigmoid() self.parsig2 = modules.ParamSigmoid()