예제 #1
0
    def __init__(self, nef=64, n_layers = 3, in_channels=3, oneconv = True, grayedge = True, 
                 embeding_size=128, n_mlp=4):
        super(Generator256, self).__init__()

        self.oneconv = oneconv
        self.grayedge = grayedge
        self.rgbchannels = 3
        self.edgechannels = 3
        if self.grayedge:
            self.edgechannels = 1
        if self.oneconv:
            self.edgechannels = self.edgechannels + self.rgbchannels  
        self.embeding_size = embeding_size
        self.embedding = StyleGenerator(embeding_size, n_mlp)
        
        modelList = []
        # 3*256*256 x --> 64*256*256 x1
        self.pad1 = ReflectionPad2d(padding=1)
        self.conv1 = Conv2d(out_channels=nef, kernel_size=3, padding=0, in_channels=in_channels)
        # 64*256*256 x1 --> 128*128*128 x2
        self.rcb1 = RCBBlock(nef, nef*2, 3, 1, embeding_size) 
        # 128*128*128 x2+y --> 128*128*128 x3
        for n in range(n_layers):
            modelList.append(ResnetBlock(nef*2, nef*2, weight=1.0, embeding_size=embeding_size))  
        # 128*128*128 x3 --> 64*256*256 
        self.rdcb1 = RDCBBlock(nef*2, nef, 3, 1, embeding_size, True)
        self.resblocks = nn.Sequential(*modelList)
        # 64*256*256 x4 --> 6*256*256
        self.pad2 = ReflectionPad2d(padding=1)
        self.relu = ReLU()
        self.conv2 = Conv2d(out_channels=self.edgechannels, kernel_size=3, padding=0, in_channels=nef*2)
        self.tanh = Tanh()     
        self.conv3 = Conv2d(out_channels=self.rgbchannels, kernel_size=3, padding=0, in_channels=nef*2)
예제 #2
0
def build_layers(input_dim, hidden_dims, output_dim):
    '''
    Returns a list of Linear and Tanh layers with the specified layer sizes

    Parameters
    ----------
    input_dim : int
        the input dimension of the first linear layer

    hidden_dims : list
        a list of type int specifying the sizes of the hidden layers

    output_dim : int
        the output dimension of the final layer in the list

    Returns
    -------
    layers : list
        a list of Linear layers, each one followed by a Tanh layer, excluding the
        final layer
    '''

    layer_sizes = [input_dim] + hidden_dims + [output_dim]
    layers = []

    for i in range(len(layer_sizes) - 1):
        layers.append(Linear(layer_sizes[i], layer_sizes[i + 1], bias=True))

        if i != len(layer_sizes) - 2:
            layers.append(Tanh())

    return layers
예제 #3
0
    def create(self, architecture: Architecture, metadata: Metadata,
               arguments: Configuration) -> Any:
        # create input layer
        input_layer = self.create_other(
            "SingleInputLayer", architecture, metadata,
            Configuration({"input_size": architecture.arguments.code_size}))

        # conditional
        if "conditional" in architecture.arguments:
            # wrap the input layer with a conditional layer
            input_layer = ConditionalLayer(
                input_layer, metadata, **architecture.arguments.conditional)

        # create the hidden layers factory
        hidden_layers_factory = self.create_other(
            "HiddenLayers", architecture, metadata,
            arguments.get("hidden_layers", {}))

        # create the output layer factory
        output_layer_factory = self.create_output_layer_factory(
            architecture, metadata, arguments)

        # create the decoder
        return FeedForward(input_layer,
                           hidden_layers_factory,
                           output_layer_factory,
                           default_hidden_activation=Tanh())
예제 #4
0
def get_activation(name):
    act_name = name.lower()
    m = re.match(r"(\w+)\((\d+\.\d+)\)", act_name)
    if m is not None:
        act_name, alpha = m.groups()
        alpha = float(alpha)
        print(act_name, alpha)
    else:
        alpha = 1.0
    if act_name == 'softplus':
        return Softplus()
    elif act_name == 'ssp':
        return SSP()
    elif act_name == 'elu':
        return ELU(alpha)
    elif act_name == 'relu':
        return ReLU()
    elif act_name == 'selu':
        return SELU()
    elif act_name == 'celu':
        return CELU(alpha)
    elif act_name == 'sigmoid':
        return Sigmoid()
    elif act_name == 'tanh':
        return Tanh()
    else:
        raise NameError("Not supported activation: {}".format(name))
예제 #5
0
파일: model.py 프로젝트: EpiSlim/bonito
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.stride = config['encoder']['stride']
        self.alphabet = config['labels']['labels']
        self.seqdist = CTC_CRF(config['global_norm']['state_len'],
                               self.alphabet)

        insize = config['input']['features']
        winlen = config['encoder']['winlen']
        activation = activations[config['encoder']['activation']]()

        rnn = rnns[config['encoder']['rnn_type']]
        size = config['encoder']['features']

        self.encoder = Sequential(
            conv(insize, 4, ks=5, bias=True),
            activation,
            conv(4, 16, ks=5, bias=True),
            activation,
            conv(16, size, ks=winlen, stride=self.stride, bias=True),
            activation,
            Permute(2, 0, 1),
            rnn(size, size, reverse=True),
            rnn(size, size),
            rnn(size, size, reverse=True),
            rnn(size, size),
            rnn(size, size, reverse=True),
            Linear(size, self.seqdist.n_score(), bias=True),
            Tanh(),
            Scale(config['encoder']['scale']),
        )
        self.global_norm = GlobalNorm(self.seqdist)
예제 #6
0
 def __init__(self,
              image_size: int = 256,
              image_channels: int = 4,
              pose_size: int = 3,
              intermediate_channels: int = 64,
              bottleneck_image_size: int = 32,
              bottleneck_block_count: int = 6,
              initialization_method: str = 'he'):
     super().__init__()
     self.main_body = UNetModule(
         image_size=image_size,
         image_channels=2 * image_channels + pose_size,
         output_channels=intermediate_channels,
         bottleneck_image_size=bottleneck_image_size,
         bottleneck_block_count=bottleneck_block_count,
         initialization_method=initialization_method)
     self.combine_alpha_mask = Sequential(
         Conv7(intermediate_channels, image_channels,
               initialization_method), Sigmoid())
     self.retouch_alpha_mask = Sequential(
         Conv7(intermediate_channels, image_channels,
               initialization_method), Sigmoid())
     self.retouch_color_change = Sequential(
         Conv7(intermediate_channels, image_channels,
               initialization_method), Tanh())
예제 #7
0
    def __init__(self, noise_dim, output_channels=3):
        super(Generator, self).__init__()
        self.noise_dim = noise_dim

        ####################################
        #          YOUR CODE HERE          #
        ####################################

        self.hidden0 = Sequential(
            # input size noise dimension, which for PA3 is 100
            ConvTranspose2d(noise_dim,
                            1024,
                            kernel_size=4,
                            stride=1,
                            padding=0,
                            bias=False),
            BatchNorm2d(1024),
            ReLU(True))
        self.hidden1 = Sequential(
            # state size (1024) x 4 x 4
            # Inverse of Formula (W - K + 2P / S) + 1 for up-sampling
            # W is input size, K is kernel size, P is padding, S is stride
            ConvTranspose2d(1024,
                            512,
                            kernel_size=4,
                            stride=2,
                            padding=1,
                            bias=False),
            BatchNorm2d(512),
            ReLU(True))
        self.hidden2 = Sequential(
            # state size (512) x 8 x 8
            ConvTranspose2d(512,
                            256,
                            kernel_size=4,
                            stride=2,
                            padding=1,
                            bias=False),
            BatchNorm2d(256),
            ReLU(True))
        self.hidden3 = Sequential(
            # state size (256) x 16 x 16
            ConvTranspose2d(256,
                            128,
                            kernel_size=4,
                            stride=2,
                            padding=1,
                            bias=False),
            BatchNorm2d(128),
            ReLU(True))
        self.out = Sequential(
            # state size (128) x 32 x 32
            ConvTranspose2d(128,
                            output_channels,
                            kernel_size=4,
                            stride=2,
                            padding=1,
                            bias=False),
            # output size (3) x 64 x 64
            Tanh())
예제 #8
0
    def __init__(self, **kwargs):
        super(SimpleStochasticModelDynamics, self).__init__()
        self.__acoustic_state_dim = kwargs['goal_dim']
        self.__action_dim = kwargs['action_dim']
        self.__state_dim = kwargs['state_dim']
        self.__linears_size = kwargs['linear_layers_size']

        input_size = self.__acoustic_state_dim + self.__state_dim + self.__action_dim
        self.__bn1 = torch.nn.BatchNorm1d(input_size)

        self.linears = ModuleList([Linear(input_size, self.__linears_size[0])])
        self.linears.extend([
            Linear(self.__linears_size[i - 1], self.__linears_size[i])
            for i in range(1, len(self.__linears_size))
        ])

        self.goal_mu = Linear(self.__linears_size[-1], kwargs['goal_dim'])
        self.goal_log_var = Linear(self.__linears_size[-1], kwargs['goal_dim'])

        self.state_mu = Linear(self.__linears_size[-1], kwargs['state_dim'])
        self.state_log_var = Linear(self.__linears_size[-1],
                                    kwargs['state_dim'])
        self.state_log_var_const = torch.zeros(kwargs['state_dim'])

        self.relu = ReLU()
        self.tanh = Tanh()

        self.apply(init_weights)  # xavier uniform init
예제 #9
0
    def __init__(self, **kwargs):
        super(DeterministicLstmModelDynamics, self).__init__()
        self.__acoustic_state_dim = kwargs['goal_dim']
        self.__action_dim = kwargs['action_dim']
        self.__state_dim = kwargs['state_dim']
        self.__lstm_sizes = kwargs['lstm_layers_size']
        self.__linears_size = kwargs['linear_layers_size']

        input_size = self.__acoustic_state_dim + self.__state_dim + self.__action_dim
        self.__bn1 = torch.nn.BatchNorm1d(input_size)

        self.lstms = ModuleList(
            [LSTM(input_size, self.__lstm_sizes[0], batch_first=True)])
        self.lstms.extend([
            LSTM(self.__lstm_sizes[i - 1],
                 self.__lstm_sizes[i],
                 batch_first=True) for i in range(1, len(self.__lstm_sizes))
        ])
        self.hiddens = [None] * len(self.__lstm_sizes)

        self.linears = ModuleList(
            [Linear(self.__lstm_sizes[-1], self.__linears_size[0])])
        self.linears.extend([
            Linear(self.__linears_size[i - 1], self.__linears_size[i])
            for i in range(1, len(self.__linears_size))
        ])

        self.goal = Linear(self.__linears_size[-1], kwargs['goal_dim'])

        self.state = Linear(self.__linears_size[-1], kwargs['state_dim'])

        self.relu = ReLU()
        self.tanh = Tanh()

        self.apply(init_weights)  # xavier uniform init
예제 #10
0
    def __init__(self, args, word_embeddings: TextFieldEmbedder,
                 vocab: Vocabulary) -> None:
        super().__init__(vocab)

        # parameters
        self.args = args
        self.word_embeddings = word_embeddings

        # gate
        self.W_z = nn.Linear(self.args.embedding_size, 1, bias=False)
        self.U_z = nn.Linear(self.args.embedding_size, 1, bias=False)
        self.W_r = nn.Linear(self.args.embedding_size, 1, bias=False)
        self.U_r = nn.Linear(self.args.embedding_size, 1, bias=False)
        self.W = nn.Linear(self.args.embedding_size, 1, bias=False)
        self.U = nn.Linear(self.args.embedding_size, 1, bias=False)

        # layers
        self.event_embedding = EventEmbedding(args, self.word_embeddings)
        self.attention = Attention(self.args.embedding_size,
                                   score_function='mlp')
        self.sigmoid = Sigmoid()
        self.tanh = Tanh()
        self.score = Score(self.args.embedding_size,
                           self.args.embedding_size,
                           threshold=self.args.threshold)

        # metrics
        self.accuracy = BooleanAccuracy()
        self.f1_score = F1Measure(positive_label=1)
        self.loss_function = BCELoss()
 def __init__(self, query_dim, hidden_dim, non_linearity=Tanh()):
     super(ScoreNetwork, self).__init__()
     self.query_dim = query_dim
     self.hidden_dim = hidden_dim
     self.query_proj = Linear(query_dim, hidden_dim, bias=True)
     self.non_lin = non_linearity
     self.hidden_to_out_proj = Linear(hidden_dim, 1)
예제 #12
0
    def __init__(self, **kwargs):
        super().__init__(kwargs)
        nb_channels_noise = 8
        nb_channels_input = 256

        self.shared_layers = kwargs.get(
            "decoder_shared_layers",
            ResBlock(nb_channels_input, norm=InstanceNorm2d))

        nb_input_channels = nb_channels_input + nb_channels_noise + \
            kwargs.get("nb_domains", 3)
        self.decoder_steps = [
            Sequential(*[
                ResBlock(nb_input_channels, norm=InstanceNorm2d)
                for _ in range(3)
            ])
        ]

        for _ in range(2):
            nb_output_channels = nb_input_channels // 2
            self.decoder_steps.append(
                ConvBlock(nb_input_channels,
                          nb_output_channels,
                          conv_layer=ConvTranspose2d,
                          transposed=True,
                          stride=2,
                          norm=LayerNorm))
            nb_input_channels = nb_output_channels + nb_channels_noise

        nb_output_channels = nb_input_channels // 2
        self.decoder_steps.append(
            Sequential(
                ConvTranspose2d(nb_input_channels, 3, kernel_size=1, stride=1),
                Tanh()))
예제 #13
0
    def __init__(self, im_size, latent_size, num_blocks):
        """
        Generator class.
        """
        super(Generator, self).__init__()

        def block(in_channels, out_channels, kernel_size, stride, padding):
            return torch.nn.Sequential(
                ConvTranspose2d(in_channels,
                                out_channels,
                                kernel_size,
                                stride,
                                padding,
                                bias=False), ReLU())

        # Layers
        # ------
        layers = []
        # First block
        num_filters = min(im_size * 2**(num_blocks - 1), im_size * 8)
        layers.append(block(latent_size, num_filters, 4, 1, 0))

        # Middle blocks
        for i in range(num_blocks - 1):
            num_filters_ = num_filters
            num_filters = min(im_size * 2**(num_blocks - 2 - i),
                              latent_size * 8)
            layer = block(num_filters_, num_filters, 4, 2, 1)
            layers.append(layer)

        # End block
        layers.extend([ConvTranspose2d(num_filters, 1, 4, 2, 1), Tanh()])

        self.layers = torch.nn.Sequential(*layers)
예제 #14
0
 def create_color_change_block(self):
     return Sequential(
         create_conv3_from_block_args(in_channels=self.args.start_channels,
                                      out_channels=self.args.image_channels,
                                      bias=True,
                                      block_args=self.args.block_args),
         Tanh())
예제 #15
0
파일: gin.py 프로젝트: zhuyawen/AutoGL
    def __init__(self, args):
        super(GIN, self).__init__()
        self.args = args
        self.num_layer = int(self.args["num_layers"])
        assert self.num_layer > 2, "Number of layers in GIN should not less than 3"

        missing_keys = list(
            set([
                "features_num", "num_class", "num_graph_features",
                "num_layers", "hidden", "dropout", "act", "mlp_layers", "eps"
            ]) - set(self.args.keys()))
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ','.join(missing_keys))
        if not self.num_layer == len(self.args['hidden']) + 1:
            LOGGER.warn(
                'Warning: layer size does not match the length of hidden units'
            )
        self.num_graph_features = self.args['num_graph_features']

        if self.args["act"] == "leaky_relu":
            act = LeakyReLU()
        elif self.args["act"] == "relu":
            act = ReLU()
        elif self.args["act"] == "elu":
            act = ELU()
        elif self.args["act"] == "tanh":
            act = Tanh()
        else:
            act = ReLU()

        train_eps = True if self.args["eps"] == "True" else False

        self.convs = torch.nn.ModuleList()
        self.bns = torch.nn.ModuleList()

        nn = [Linear(self.args["features_num"], self.args["hidden"][0])]
        for _ in range(self.args["mlp_layers"] - 1):
            nn.append(act)
            nn.append(Linear(self.args["hidden"][0], self.args["hidden"][0]))
        # nn.append(BatchNorm1d(self.args['hidden'][0]))
        self.convs.append(GINConv(Sequential(*nn), train_eps=train_eps))
        self.bns.append(BatchNorm1d(self.args["hidden"][0]))

        for i in range(self.num_layer - 3):
            nn = [Linear(self.args["hidden"][i], self.args["hidden"][i + 1])]
            for _ in range(self.args["mlp_layers"] - 1):
                nn.append(act)
                nn.append(
                    Linear(self.args["hidden"][i + 1],
                           self.args["hidden"][i + 1]))
            # nn.append(BatchNorm1d(self.args['hidden'][i+1]))
            self.convs.append(GINConv(Sequential(*nn), train_eps=train_eps))
            self.bns.append(BatchNorm1d(self.args["hidden"][i + 1]))

        self.fc1 = Linear(
            self.args["hidden"][self.num_layer - 3] + self.num_graph_features,
            self.args["hidden"][self.num_layer - 2],
        )
        self.fc2 = Linear(self.args["hidden"][self.num_layer - 2],
                          self.args["num_class"])
예제 #16
0
    def __init__(self, **kwargs):
        super(SimpleStochasticActorCritic, self).__init__()
        hidden_size = kwargs['hidden_size']
        # actor
        self.actor_bn = BatchNorm1d(kwargs['state_dim'])
        self.actor_linears = ModuleList(
            [Linear(kwargs['state_dim'], hidden_size[0])])
        self.actor_linears.extend([
            Linear(hidden_size[i - 1], hidden_size[i])
            for i in range(1, len(hidden_size))
        ])
        self.mu = Linear(hidden_size[-1], kwargs['action_dim'])
        self.log_var = Linear(hidden_size[-1], kwargs['action_dim'])
        self.log_var_const = torch.zeros(kwargs['action_dim'])

        # critic
        self.critic_bn = BatchNorm1d(kwargs['state_dim'])
        self.critic_linears = ModuleList(
            [Linear(kwargs['state_dim'], hidden_size[0])])
        self.critic_linears.extend([
            Linear(hidden_size[i - 1], hidden_size[i])
            for i in range(1, len(hidden_size))
        ])
        self.v = Linear(hidden_size[-1], 1)

        self.relu = ReLU()
        self.tanh = Tanh()

        self.apply(init_weights)  # xavier uniform init
예제 #17
0
    def __init__(self, nX, nZ, nH, nXi, nLayer, cluster_interval, activation):
        '''
        Constructor
        '''

        super(WaeAgent, self).__init__()

        Activation = None
        if activation == "relu":
            Activation = ReLU()
        if activation == "tanh":
            Activation = Tanh()
        assert Activation is not None

        stacks = [Linear(nX, nH), Activation]
        for _ in range(nLayer - 1):
            stacks.append(Linear(nH, nH))
            stacks.append(Activation)
        stacks.append(Linear(nH, nXi))

        enc = Sequential(*stacks)

        stacks = [Linear(nXi, nH), Activation]
        for _ in range(nLayer - 1):
            stacks.append(Linear(nH, nH))
            stacks.append(Activation)
        stacks.append(Linear(nH, nX))

        dec = Sequential(*stacks)

        self.enc = enc
        self.dec = dec
        self.nZ = nZ
        self.nXi = nXi
        self.cluster_interval = cluster_interval
예제 #18
0
    def __init__(self, **kwargs):
        super(SimpleDDPGAgent, self).__init__()

        hidden_size = kwargs['hidden_size']
        # actor
        self.actor_linears = ModuleList(
            [Linear(kwargs['state_dim'], hidden_size[0])])
        self.actor_linears.extend([
            Linear(hidden_size[i - 1], hidden_size[i])
            for i in range(1, len(hidden_size))
        ])
        self.action = Linear(hidden_size[-1], kwargs['action_dim'])

        # critic
        self.critic_linears = ModuleList([
            Linear(kwargs['state_dim'] + kwargs['action_dim'], hidden_size[0])
        ])
        self.critic_linears.extend([
            Linear(hidden_size[i - 1], hidden_size[i])
            for i in range(1, len(hidden_size))
        ])
        self.q = Linear(hidden_size[-1], 1)

        self.relu = ReLU()
        self.sigmoid = Sigmoid()
        self.tanh = Tanh()

        self.apply(init_weights)  # xavier uniform init
 def __init__(self, ll_scaling=1.0, dim_z=latent_dim):
     super(ConditionalDecoder, self).__init__()
     self.dim_z = dim_z
     ngf = 32
     self.init = genUpsample(self.dim_z, ngf * 16, 1, 0)
     self.embedding = Sequential(
         Linear(labels_dim, self.dim_z),
         BatchNorm1d(self.dim_z, momentum=momentum),
         LeakyReLU(negative_slope=negative_slope),
     )
     self.dense_init = Sequential(
         Linear(self.dim_z * 2, self.dim_z),
         BatchNorm1d(self.dim_z, momentum=momentum),
         LeakyReLU(negative_slope=negative_slope),
     )
     self.m_modules = ModuleList()  # to 4x4
     self.c_modules = ModuleList()
     for i in range(4):
         self.m_modules.append(
             genUpsample2(ngf * 2**(4 - i), ngf * 2**(3 - i), 3))
         self.c_modules.append(
             Sequential(
                 Conv2d(ngf * 2**(3 - i), colors_dim, 3, 1, 1, bias=False),
                 Tanh()))
     self.set_optimizer(optimizer,
                        lr=learning_rate * ll_scaling,
                        betas=betas)
예제 #20
0
    def __init__(self, ngf=32, n_layers = 5):
        super(GlyphGenerator, self).__init__()
        
        encoder = []
        encoder.append(ReplicationPad2d(padding=4))
        encoder.append(Conv2d(out_channels=ngf, kernel_size=9, padding=0, in_channels=3))
        encoder.append(LeakyReLU(0.2))
        encoder.append(myGConv(ngf*2, 2, ngf))
        encoder.append(myGConv(ngf*4, 2, ngf*2))

        transformer = []
        for n in range(int(n_layers/2)-1):
            transformer.append(myGCombineBlock(ngf*4,p=0.0))
        # dropout to make model more robust    
        transformer.append(myGCombineBlock(ngf*4,p=0.5))
        transformer.append(myGCombineBlock(ngf*4,p=0.5))
        for n in range(int(n_layers/2)+1,n_layers):
            transformer.append(myGCombineBlock(ngf*4,p=0.0))  
        
        decoder = []
        decoder.append(ConvTranspose2d(out_channels=ngf*2, kernel_size=4, stride=2, padding=0, in_channels=ngf*4))
        decoder.append(BatchNorm2d(num_features=ngf*2, track_running_stats=True))
        decoder.append(LeakyReLU(0.2))
        decoder.append(ConvTranspose2d(out_channels=ngf, kernel_size=4, stride=2, padding=0, in_channels=ngf*2))
        decoder.append(BatchNorm2d(num_features=ngf, track_running_stats=True))
        decoder.append(LeakyReLU(0.2))
        decoder.append(ReplicationPad2d(padding=1))
        decoder.append(Conv2d(out_channels=3, kernel_size=9, padding=0, in_channels=ngf))
        decoder.append(Tanh())
        
        self.encoder = nn.Sequential(*encoder)
        self.transformer = nn.Sequential(*transformer)
        self.decoder = nn.Sequential(*decoder)
예제 #21
0
    def __init__(self, labels_dim=0, D_lr=2e-4):
        super(Discriminator, self).__init__()

        self.conv = Sequential(
            Conv2d(3, 16, kernel_size=3, stride=2, padding=1),
            LeakyReLU(0.2, inplace=True),
            Conv2d(16, 32, kernel_size=3, stride=2, padding=1),
            BatchNorm2d(32),
            LeakyReLU(0.2, inplace=True),
            Conv2d(32, 64, kernel_size=3, stride=2, padding=1),
            BatchNorm2d(64),
            LeakyReLU(0.2, inplace=True),
            Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
            BatchNorm2d(128),
            LeakyReLU(0.2, inplace=True),
            Conv2d(128, 256, kernel_size=4, stride=2, padding=0),
            BatchNorm2d(256),
            LeakyReLU(0.2, inplace=True),
        )
        self.flatten = Flatten()
        self.linear = Sequential(
            Linear(256, 1),
            Tanh(),
        )

        self.set_optimizer(optimizer, lr=D_lr)
예제 #22
0
 def __init__(self, in_channels = 4, ngf = 32, n_layers = 5):
     super(SketchGenerator, self).__init__()
     
     encoder = []
     encoder.append(Conv2d(out_channels=ngf, kernel_size=9, padding=4, in_channels=in_channels))
     encoder.append(ReLU())
     encoder.append(mySConv(ngf*2, 2, ngf))
     encoder.append(mySConv(ngf*4, 2, ngf*2))
     
     transformer = []
     for n in range(n_layers):
         transformer.append(mySBlock(ngf*4+1))
     
     decoder1 = []
     decoder2 = []
     decoder3 = []
     decoder1.append(ConvTranspose2d(out_channels=ngf*2, kernel_size=4, stride=2, padding=0, in_channels=ngf*4+2))
     decoder1.append(InstanceNorm2d(num_features=ngf*2))
     decoder1.append(ReLU())
     decoder2.append(ConvTranspose2d(out_channels=ngf, kernel_size=4, stride=2, padding=0, in_channels=ngf*2+1))
     decoder2.append(InstanceNorm2d(num_features=ngf))
     decoder2.append(ReLU())
     decoder3.append(Conv2d(out_channels=3, kernel_size=9, padding=1, in_channels=ngf+1))
     decoder3.append(Tanh())
     
     self.encoder = nn.Sequential(*encoder)
     self.transformer = nn.Sequential(*transformer)
     self.decoder1 = nn.Sequential(*decoder1)
     self.decoder2 = nn.Sequential(*decoder2)
     self.decoder3 = nn.Sequential(*decoder3)
    def __init__(self, input_shape, input_channels, \
                 n_fc_filters, h_shape, conv3d_filter_shape):
        super(recurrent_layer, self).__init__()

        #nonlinearities of the network
        self.leaky_relu = LeakyReLU(negative_slope=0.01)
        self.sigmoid = Sigmoid()
        self.tanh = Tanh()

        #find the input feature map size of the fully connected layer
        #fc7_feat_w, fc7_feat_h = self.fc_in_featmap_size(input_shape, num_pooling=6)
        fc7_feat_w, fc7_feat_h = 5, 5

        #define the fully connected layer
        self.fc7 = Linear(int(input_channels * fc7_feat_w * fc7_feat_h),
                          n_fc_filters[0])

        #define the FCConv3DLayers in 3d convolutional gru unit
        #conv3d_filter_shape = (self.n_deconvfilter[0], self.n_deconvfilter[0], 3, 3, 3)
        self.t_x_s_update = BN_FCConv3DLayer_torch(n_fc_filters[0],
                                                   conv3d_filter_shape,
                                                   h_shape)
        self.t_x_s_reset = BN_FCConv3DLayer_torch(n_fc_filters[0],
                                                  conv3d_filter_shape, h_shape)
        self.t_x_rs = BN_FCConv3DLayer_torch(n_fc_filters[0],
                                             conv3d_filter_shape, h_shape)
예제 #24
0
 def __init__(self, ngf = 32, n_layers = 5):
     super(TextureGenerator, self).__init__()
     
     modelList = []
     modelList.append(ReplicationPad2d(padding=4))
     modelList.append(Conv2d(out_channels=ngf, kernel_size=9, padding=0, in_channels=3))
     modelList.append(ReLU())
     modelList.append(myTConv(ngf*2, 2, ngf))
     modelList.append(myTConv(ngf*4, 2, ngf*2))
     
     for n in range(int(n_layers/2)): 
         modelList.append(myTBlock(ngf*4, p=0.0))
     # dropout to make model more robust
     modelList.append(myTBlock(ngf*4, p=0.5))
     for n in range(int(n_layers/2)+1,n_layers):
         modelList.append(myTBlock(ngf*4, p=0.0))  
     
     modelList.append(ConvTranspose2d(out_channels=ngf*2, kernel_size=4, stride=2, padding=0, in_channels=ngf*4))
     modelList.append(BatchNorm2d(num_features=ngf*2, track_running_stats=True))
     modelList.append(ReLU())
     modelList.append(ConvTranspose2d(out_channels=ngf, kernel_size=4, stride=2, padding=0, in_channels=ngf*2))
     modelList.append(BatchNorm2d(num_features=ngf, track_running_stats=True))
     modelList.append(ReLU())
     modelList.append(ReplicationPad2d(padding=1))
     modelList.append(Conv2d(out_channels=3, kernel_size=9, padding=0, in_channels=ngf))
     modelList.append(Tanh())
     self.model = nn.Sequential(*modelList)
예제 #25
0
def get_activation_from_name(a):
    act = ReLU(inplace=True)
    if a == configuration.sigmoid:
        act = Sigmoid()
    if a == configuration.tanh:
        act = Tanh()
    return act
    def __init__(self, input_shape, n_convfilter, \
                 n_fc_filters, h_shape, conv3d_filter_shape):
        print("\ninitializing \"encoder\"")
        #input_shape = (self.batch_size, 3, img_w, img_h)
        super(encoder, self).__init__()

        self.input_shape = input_shape
        self.n_convfilter = n_convfilter
        self.n_fc_filters = n_fc_filters
        self.h_shape = h_shape
        self.conv3d_filter_shape = conv3d_filter_shape

        # #conv1
        # self.conv1a = Conv2d(input_shape[1], n_convfilter[0], 7, padding=3)
        # self.conv1b = Conv2d(n_convfilter[0], n_convfilter[0], 3, padding=1)

        # #conv2
        # self.conv2a = Conv2d(n_convfilter[0], n_convfilter[1], 3, padding=1)
        # self.conv2b = Conv2d(n_convfilter[1], n_convfilter[1], 3, padding=1)
        # self.conv2c = Conv2d(n_convfilter[0], n_convfilter[1], 1)

        # #conv3
        # self.conv3a = Conv2d(n_convfilter[1], n_convfilter[2], 3, padding=1)
        # self.conv3b = Conv2d(n_convfilter[2], n_convfilter[2], 3, padding=1)
        # self.conv3c = Conv2d(n_convfilter[1], n_convfilter[2], 1)

        # #conv4
        # self.conv4a = Conv2d(n_convfilter[2], n_convfilter[3], 3, padding=1)
        # self.conv4b = Conv2d(n_convfilter[3], n_convfilter[3], 3, padding=1)

        # #conv5
        # self.conv5a = Conv2d(n_convfilter[3], n_convfilter[4], 3, padding=1)
        # self.conv5b = Conv2d(n_convfilter[4], n_convfilter[4], 3, padding=1)
        # self.conv5c = Conv2d(n_convfilter[3], n_convfilter[4], 1)

        # #conv6
        # self.conv6a = Conv2d(n_convfilter[4], n_convfilter[5], 3, padding=1)
        # self.conv6b = Conv2d(n_convfilter[5], n_convfilter[5], 3, padding=1)

        #pooling layer
        self.pool = MaxPool2d(kernel_size=2, padding=1)

        #nonlinearities of the network
        self.leaky_relu = LeakyReLU(negative_slope=0.01)
        self.sigmoid = Sigmoid()
        self.tanh = Tanh()

        #find the input feature map size of the fully connected layer
        fc7_feat_w, fc7_feat_h = self.fc_in_featmap_size(input_shape,
                                                         num_pooling=6)
        #define the fully connected layer
        # self.fc7 = Linear(int(n_convfilter[5] * fc7_feat_w * fc7_feat_h), n_fc_filters[0])

        #define the FCConv3DLayers in 3d convolutional gru unit
        self.t_x_s_update = FCConv3DLayer_torch(n_fc_filters[0],
                                                conv3d_filter_shape, h_shape)
        self.t_x_s_reset = FCConv3DLayer_torch(n_fc_filters[0],
                                               conv3d_filter_shape, h_shape)
        self.t_x_rs = FCConv3DLayer_torch(n_fc_filters[0], conv3d_filter_shape,
                                          h_shape)
예제 #27
0
    def __init__(self, hidden, num_aggr, config, **kwargs):
        super(ExpandingBConv, self).__init__(aggr='add', **kwargs)
        self.hidden = hidden
        self.num_aggr = num_aggr

        if config.fea_activation == 'ELU':
            self.fea_activation = ELU()
        elif config.fea_activation == 'ReLU':
            self.fea_activation = ReLU()

        self.fea_mlp = Sequential(
            Linear(hidden * self.num_aggr, hidden),
            ReLU(),
            Linear(hidden, hidden),
            self.fea_activation)

        self.aggr_mlp = Sequential(
            Linear(hidden * 2, self.num_aggr),
            Tanh())

        if config.BN == 'Y':
            self.BN = BN(hidden)
        else:
            self.BN = None

        self.edge_encoder = torch.nn.Linear(7, hidden)

        self.reset_parameters()
예제 #28
0
    def create(self, architecture: Architecture, metadata: Metadata,
               arguments: Configuration) -> Any:
        # create the input layer
        input_layer = self.create_input_layer(architecture, metadata,
                                              arguments)
        # wrap the input layer with the special gain input layer (to receive the mask)
        input_layer = GAINInputLayer(input_layer, metadata.get_num_features())

        # create the hidden layers factory
        hidden_layers_factory = self.create_other(
            "HiddenLayers", architecture, metadata,
            arguments.get("hidden_layers", {}))

        # create the output layer factory
        # this is different from a normal discriminator
        # because the output has the size of the input
        # it predicts if each feature is real or fake
        output_layer_factory = SingleOutputLayerFactory(
            metadata.get_num_features(), activation=Sigmoid())

        # create the encoder
        return FeedForward(input_layer,
                           hidden_layers_factory,
                           output_layer_factory,
                           default_hidden_activation=Tanh())
예제 #29
0
 def __init__(self):
     super(autoencoder, self).__init__()
     self.encoder = Sequential(Linear(28 * 28, 128), ReLU(True),
                               Linear(128, 64), ReLU(True), Linear(64, 12),
                               ReLU(True), Linear(12, 3))
     self.decoder = Sequential(Linear(3, 12), ReLU(True), Linear(12, 64),
                               ReLU(True), Linear(64, 128), ReLU(True),
                               Linear(128, 28 * 28), Tanh())
예제 #30
0
파일: layers.py 프로젝트: Kyubyong/pororo-1
def get_activation(name):
    """Get activation function by name."""
    return {
        "relu": ReLU(),
        "sigmoid": Sigmoid(),
        "tanh": Tanh(),
        "identity": Identity(),
    }[name]