예제 #1
0
    def __init__(self, input_nc, output_nc, ngf):
        super(G, self).__init__()
        self.conv1 = nn2.SpatialDilatedConvolution(input_nc, ngf, 6, 6, 1, 1,
                                                   0, 0, 2, 2)
        self.conv2 = nn.Conv2d(ngf, ngf * 2, 5, 2, 0)
        self.conv3 = nn.Conv2d(ngf * 2, ngf * 4, 6, 2, 1)
        self.conv4 = nn.Conv2d(ngf * 4, ngf * 8, 6, 2, 1)
        self.conv8 = nn.Conv2d(ngf * 8, ngf * 8, 6, 2, 1)
        self.dconv1 = nn.ConvTranspose2d(ngf * 8, ngf * 8, 6, 2, 1)
        self.dconv2 = nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, 6, 2, 1)
        self.dconv3 = nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, 6, 2, 1)
        self.dconv4 = nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, 6, 2, 1)
        self.dconv5 = nn.ConvTranspose2d(ngf * 8 * 2, ngf * 4, 7, 2, 1)
        self.dconv6 = nn.ConvTranspose2d(ngf * 4 * 2, ngf * 2, 7, 2, 1)
        self.dconv7 = nn.ConvTranspose2d(ngf * 2 * 2, ngf, 8, 2, 1)
        self.dconv8 = nn.ConvTranspose2d(ngf * 2, output_nc, 11, 1, 0)

        self.batch_norm = nn.BatchNorm2d(ngf)
        self.batch_norm2 = nn.BatchNorm2d(ngf * 2)
        self.batch_norm4 = nn.BatchNorm2d(ngf * 4)
        self.batch_norm8 = nn.BatchNorm2d(ngf * 8)

        self.leaky_relu = nn.LeakyReLU(0.2, True)
        self.relu = nn.ReLU(True)

        self.dropout = nn.Dropout(0.5)

        self.tanh = nn.Tanh()
예제 #2
0
    def __init__(self, input_nc, output_nc, ngf):
        super(G, self).__init__()
        #self.batch_norm = nn.BatchNorm2d(ngf)

        self.conv1 = nn2.SpatialDilatedConvolution(input_nc, 3, 3, 3, 1, 1, 1,
                                                   1, 1, 1)
        self.conv1 = self.conv1.cuda()
        '''
예제 #3
0
    def __init__(self, input_nc, output_nc, ngf):
        super(G, self).__init__()
        self.conv1 = nn2.SpatialDilatedConvolution(input_nc, ngf, 3, 3, 1, 1,
                                                   1, 1, 1, 1)
        self.conv1 = self.conv1.cuda()
        self.conv2 = nn2.SpatialDilatedConvolution(ngf, ngf * 2, 3, 3, 1, 1, 2,
                                                   2, 2, 2)
        self.conv2 = self.conv2.cuda()

        self.conv3 = nn2.SpatialDilatedConvolution(ngf * 2, ngf * 4, 3, 3, 1,
                                                   1, 4, 4, 4, 4)
        self.conv3 = self.conv3.cuda()

        self.conv5 = nn2.SpatialDilatedConvolution(ngf * 4, ngf * 4, 3, 3, 1,
                                                   1, 8, 8, 8, 8)
        self.conv5 = self.conv5.cuda()

        self.conv6 = nn2.SpatialDilatedConvolution(ngf * 4, ngf * 4, 3, 3, 1,
                                                   1, 16, 16, 16, 16)
        self.conv6 = self.conv6.cuda()

        self.conv7 = nn2.SpatialDilatedConvolution(ngf * 4, ngf * 4, 3, 3, 1,
                                                   1, 32, 32, 32, 32)
        self.conv7 = self.conv7.cuda()

        self.conv8 = nn2.SpatialDilatedConvolution(ngf * 4, ngf * 4, 3, 3, 1,
                                                   1, 64, 64, 64, 64)
        self.conv8 = self.conv8.cuda()

        self.conv4 = nn2.SpatialDilatedConvolution(ngf * 4, 3, 1, 1, 1, 1, 0,
                                                   0, 1, 1)
        self.conv4 = self.conv4.cuda()

        self.batch_norm = nn.BatchNorm2d(ngf)
        self.batch_norm = self.batch_norm.cuda()

        self.batch_norm2 = nn.BatchNorm2d(ngf * 2)
        self.batch_norm2 = self.batch_norm2.cuda()
        self.batch_norm4 = nn.BatchNorm2d(ngf * 4)
        self.batch_norm4 = self.batch_norm4.cuda()
        self.batch_norm8 = nn.BatchNorm2d(ngf * 8)
        self.batch_norm8 = self.batch_norm8.cuda()

        self.leaky_relu = nn.LeakyReLU(0.2, True)
        self.leaky_relu = self.leaky_relu.cuda()
        self.relu = nn.ReLU(True)
        self.relu = self.relu.cuda()

        self.dropout = nn.Dropout(0.5)

        self.tanh = nn.Tanh()
        self.tanh = self.tanh.cuda()
예제 #4
0
    def __init__(self, input_nc, output_nc, ngf):
        super(G, self).__init__()
        self.conv1 = nn2.SpatialDilatedConvolution(input_nc, ngf, 5, 5, 1, 1,
                                                   2, 2, 1, 1)
        self.conv2 = nn2.SpatialDilatedConvolution(ngf, ngf * 2, 5, 5, 1, 1, 4,
                                                   4, 2, 2)
        self.conv3 = nn2.SpatialDilatedConvolution(ngf * 2, ngf * 4, 5, 5, 1,
                                                   1, 6, 6, 3, 3)
        self.conv4 = nn2.SpatialDilatedConvolution(ngf * 4, 3, 5, 5, 1, 1, 6,
                                                   6, 3, 3)

        self.batch_norm = nn.BatchNorm2d(ngf)
        self.batch_norm2 = nn.BatchNorm2d(ngf * 2)
        self.batch_norm4 = nn.BatchNorm2d(ngf * 4)
        self.batch_norm8 = nn.BatchNorm2d(ngf * 8)

        self.leaky_relu = nn.LeakyReLU(0.2, True)
        self.relu = nn.ReLU(True)

        self.dropout = nn.Dropout(0.5)

        self.tanh = nn.Tanh()
예제 #5
0
def FCNN():
	num_classes = 2
	n_layers_enc = 32
	n_layers_ctx = 128
	n_input = 5
	prob_drop = 0.25
	layers = []
	# Encoder
	pool = nn2.SpatialMaxPooling(2,2,2,2)
	layers.append(nn2.SpatialConvolution(n_input, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialConvolution(n_layers_enc, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(pool)
	# Context Module
	layers.append(nn2.SpatialDilatedConvolution(n_layers_enc, n_layers_ctx, 3, 3, 1, 1, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 2, 2, 2, 2))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 4, 4, 4, 4))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 8, 8, 8, 8))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 16, 16, 16, 16))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 32, 32, 32, 32))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 64, 64, 64, 64))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_enc, 1, 1))
	layers.append(nn.ELU())	# Nao havia no paper
	# Decoder
	layers.append(nn2.SpatialMaxUnpooling(pool))
	layers.append(nn2.SpatialConvolution(n_layers_enc, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialConvolution(n_layers_enc, num_classes, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn.SoftMax()) # Nao havia no paper
	return nn.Sequential(*layers)