コード例 #1
0
ファイル: Pytorch_Classifier.py プロジェクト: sebltm/ANC-ML
    def __init__(self):
        self.device = torch.device("cuda")

        self.iterator = AudioDataset.NoisyMusicDataset(
            noisy_music_folder="Processed")

        super(Net, self).__init__()

        self.layer1 = nn.Sequential(
            nn.Conv1d(2, 32, kernel_size=3, stride=1, padding=0),
            nn.Tanhshrink(),
            nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=0),
            nn.MaxPool1d(kernel_size=2, stride=1), nn.Dropout(0.25))

        self.layer2 = nn.Sequential(
            nn.Conv1d(64, 64, kernel_size=3, stride=1, padding=0),
            nn.Tanhshrink(),
            nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=0),
            nn.MaxPool1d(kernel_size=2, stride=1), nn.Dropout(0.5))

        self.layer3 = nn.Sequential(
            nn.Conv1d(128, 128, kernel_size=3, stride=1, padding=0),
            nn.Tanhshrink(),
            nn.Conv1d(128, 512, kernel_size=3, stride=1, padding=0),
            nn.MaxPool1d(kernel_size=2, stride=1), nn.Dropout(0.5))

        self.fc1 = nn.Sequential(nn.Linear(512, 10), nn.Tanhshrink(),
                                 nn.Dropout(0.5), nn.Linear(10, 2),
                                 nn.Softmax())
コード例 #2
0
    def __init__(self, matrix, n_modules, f_in=50, f_out=1):
        super(FLUX, self).__init__()
        # gene to flux
        self.inSize = f_in

        self.m_encoder = nn.ModuleList([
            nn.Sequential(
                nn.Linear(self.inSize, 8, bias=False), nn.Tanhshrink(),
                nn.Linear(8, f_out),
                nn.Tanhshrink()
                #                                                      nn.SELU()
            ) for i in range(n_modules)
        ])
コード例 #3
0
    def __init__(self):
        super(GCLoss, self).__init__()

        sobel_x = torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).view(
            (1, 1, 3, 3)).repeat(1, 3, 1, 1)
        sobel_y = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).view(
            (1, 1, 3, 3)).repeat(1, 3, 1, 1)

        self.G_x_B = nn.Conv2d(3,
                               1,
                               kernel_size=3,
                               stride=1,
                               padding=0,
                               bias=False)
        self.G_x_B.weight = nn.Parameter(sobel_x)
        for param in self.G_x_B.parameters():
            param.requires_grad = False

        self.G_y_B = nn.Conv2d(3,
                               1,
                               kernel_size=3,
                               stride=1,
                               padding=0,
                               bias=False)
        self.G_y_B.weight = nn.Parameter(sobel_y)
        for param in self.G_y_B.parameters():
            param.requires_grad = False

        self.G_x_R = nn.Conv2d(3,
                               1,
                               kernel_size=3,
                               stride=1,
                               padding=0,
                               bias=False)
        self.G_x_R.weight = nn.Parameter(sobel_x)
        for param in self.G_x_R.parameters():
            param.requires_grad = False

        self.G_y_R = nn.Conv2d(3,
                               1,
                               kernel_size=3,
                               stride=1,
                               padding=0,
                               bias=False)
        self.G_y_R.weight = nn.Parameter(sobel_y)
        for param in self.G_y_R.parameters():
            param.requires_grad = False

        self.af_B = nn.Tanhshrink()
        self.af_R = nn.Tanhshrink()
コード例 #4
0
 def create_str_to_activations_converter(self):
     """Creates a dictionary which converts strings to activations"""
     str_to_activations_converter = {
         "elu": nn.ELU(),
         "hardshrink": nn.Hardshrink(),
         "hardtanh": nn.Hardtanh(),
         "leakyrelu": nn.LeakyReLU(),
         "logsigmoid": nn.LogSigmoid(),
         "prelu": nn.PReLU(),
         "relu": nn.ReLU(),
         "relu6": nn.ReLU6(),
         "rrelu": nn.RReLU(),
         "selu": nn.SELU(),
         "sigmoid": nn.Sigmoid(),
         "softplus": nn.Softplus(),
         "logsoftmax": nn.LogSoftmax(),
         "softshrink": nn.Softshrink(),
         "softsign": nn.Softsign(),
         "tanh": nn.Tanh(),
         "tanhshrink": nn.Tanhshrink(),
         "softmin": nn.Softmin(),
         "softmax": nn.Softmax(dim=1),
         "none": None
     }
     return str_to_activations_converter
コード例 #5
0
    def __init__(self, alpha=1.0):
        super().__init__()
        self.activations = [
            nn.ELU(),
            nn.Hardshrink(),
            nn.Hardtanh(),
            nn.LeakyReLU(),
            nn.LogSigmoid(),
            nn.ReLU(),
            nn.PReLU(),
            nn.SELU(),
            nn.CELU(),
            nn.Sigmoid(),
            nn.Softplus(),
            nn.Softshrink(),
            nn.Softsign(),
            nn.Tanh(),
            nn.Tanhshrink()
        ]

        self.P = [
            torch.nn.Parameter(torch.randn(1, requires_grad=True))
            for _ in self.activations
        ]

        for activation, param in zip(self.activations, self.P):
            activation_name = str(activation).split("(")[0]
            self.add_module(name=activation_name, module=activation)
            self.register_parameter(name=activation_name + "p", param=param)
コード例 #6
0
ファイル: network.py プロジェクト: n778509775/NWCQ
    def __init__(self, num_inputs):
        super(Decoder_c, self).__init__()
        self.decoder = nn.Sequential(nn.Linear(num_inputs, num_inputs),
                                     nn.Dropout(0.5), nn.Tanhshrink(),
                                     nn.Linear(num_inputs, num_inputs))

        self.decoder.apply(init_weights)
コード例 #7
0
ファイル: model.py プロジェクト: Filter-Bubble/stroll
    def __init__(self,
                 in_feats=64,
                 out_feats=64,
                 activation='relu',
                 skip=False):
        super(RGCN, self).__init__()
        self.in_feats = in_feats
        self.out_feats = out_feats
        self.activation = activation
        self.skip = skip

        # weight bases in equation (3)
        self.weight = nn.Parameter(
            torch.Tensor(3, self.in_feats, self.out_feats))
        nn.init.kaiming_uniform_(self.weight,
                                 mode='fan_in',
                                 nonlinearity='relu')

        self.batchnorm = nn.BatchNorm1d(self.out_feats)

        if activation == 'relu':
            self.activation_ = nn.ReLU()
        elif activation == 'tanhshrink':
            self.activation_ = nn.Tanhshrink()
        else:
            print('Activation function not implemented.')
            sys.exit(-1)
コード例 #8
0
ファイル: model.py プロジェクト: Filter-Bubble/stroll
    def __init__(self,
                 in_feats=64,
                 out_feats=64,
                 activation='relu',
                 batchnorm=True):
        super(Embedding, self).__init__()
        self.in_feats = in_feats
        self.out_feats = out_feats
        self.activation = activation
        self.batchnorm = batchnorm

        layers = []

        layer = nn.Linear(self.in_feats, self.out_feats)
        nn.init.kaiming_uniform_(layer.weight,
                                 mode='fan_in',
                                 nonlinearity='relu')
        layers.append(layer)

        if self.batchnorm:
            layer = nn.BatchNorm1d(self.out_feats)
            layers.append(layer)

        if self.activation == 'relu':
            layer = nn.ReLU()
        elif self.activation == 'tanhshrink':
            layer = nn.Tanhshrink()
        else:
            print('Activation function not implemented.')
            sys.exit(-1)
        layers.append(layer)

        self.fc = nn.Sequential(*layers)
コード例 #9
0
def parse_activation(act):
    if act is None:
        return lambda x: x

    act, kwargs = parse_str(act)

    if act == 'sigmoid': return nn.Sigmoid(**kwargs)
    if act == 'tanh': return nn.Tanh(**kwargs)
    if act == 'relu': return nn.ReLU(**kwargs, inplace=True)
    if act == 'relu6': return nn.ReLU6(**kwargs, inplace=True)
    if act == 'elu': return nn.ELU(**kwargs, inplace=True)
    if act == 'selu': return nn.SELU(**kwargs, inplace=True)
    if act == 'prelu': return nn.PReLU(**kwargs)
    if act == 'leaky_relu': return nn.LeakyReLU(**kwargs, inplace=True)
    if act == 'threshold': return nn.Threshold(**kwargs, inplace=True)
    if act == 'hardtanh': return nn.Hardtanh(**kwargs, inplace=True)
    if act == 'log_sigmoid': return nn.LogSigmoid(**kwargs)
    if act == 'softplus': return nn.Softplus(**kwargs)
    if act == 'softshrink': return nn.Softshrink(**kwargs)
    if act == 'tanhshrink': return nn.Tanhshrink(**kwargs)
    if act == 'softmin': return nn.Softmin(**kwargs)
    if act == 'softmax': return nn.Softmax(**kwargs)
    if act == 'softmax2d': return nn.Softmax2d(**kwargs)
    if act == 'log_softmax': return nn.LogSoftmax(**kwargs)

    raise ValueError(f'unknown activation: {repr(act)}')
コード例 #10
0
def initializePopulation(population_size, runs, featuresUsed):
    # create initial population and store in dictionary for competition
    populationDict = {}
    for i in range(0, population_size):
        # initialize learning rate - range [10^-1, 10^-6]
        learning_rate = 10**(-1*np.random.uniform(1, 6))

        # initialize epochs - range [1, 100]
        epochs = int(np.random.uniform(1, 100))

        # initialize momentum - range [0, 0.99]
        momentum = np.random.uniform(0, 0.99)

        # initialize layerSizes - range [2, 80]
        layerSizes = 2+np.random.randint(79, size=(1, 1))[0][0]

        # initialize activationFunction
        activationFunctionList = [nn.ReLU(), nn.Sigmoid(), nn.Tanh(),
                                 nn.Tanhshrink(), nn.Hardtanh()]
        activationFunction = random.choice(activationFunctionList)

        # initialize encoding of solution and find accuracy
        populationDict[i] = MyEncoding(i, learning_rate,epochs, momentum, featuresUsed, layerSizes,
                                    activationFunction)
        populationDict[i].accuracy = cross_train(populationDict[i], runs)

    return populationDict
コード例 #11
0
def str2act(s):
    if s is 'none':
        return None
    elif s is 'hardtanh':
        return nn.Hardtanh()
    elif s is 'sigmoid':
        return nn.Sigmoid()
    elif s is 'relu6':
        return nn.ReLU6()
    elif s is 'tanh':
        return nn.Tanh()
    elif s is 'tanhshrink':
        return nn.Tanhshrink()
    elif s is 'hardshrink':
        return nn.Hardshrink()
    elif s is 'leakyrelu':
        return nn.LeakyReLU()
    elif s is 'softshrink':
        return nn.Softshrink()
    elif s is 'softsign':
        return nn.Softsign()
    elif s is 'relu':
        return nn.ReLU()
    elif s is 'prelu':
        return nn.PReLU()
    elif s is 'softplus':
        return nn.Softplus()
    elif s is 'elu':
        return nn.ELU()
    elif s is 'selu':
        return nn.SELU()
    else:
        raise ValueError("[!] Invalid activation function.")
コード例 #12
0
    def forward(self, data):

        act = nn.Tanhshrink()
        act = F.relu
        #act = nn.LeakyReLU(0.25)

        # first conv block
        data.x = act(self.conv1(
            data.x, data.edge_index, data.edge_attr))
        cluster = get_preloaded_cluster(data.cluster0, data.batch)
        data = community_pooling(cluster, data)

        # second conv block
        data.x = act(self.conv2(
            data.x, data.edge_index, data.edge_attr))
        cluster = get_preloaded_cluster(data.cluster1, data.batch)
        x, batch = max_pool_x(cluster, data.x, data.batch)

        # FC
        x = scatter_mean(x, batch, dim=0)
        x = act(self.fc1(x))
        x = self.fc2(x)
        #x = F.dropout(x, training=self.training)

        return x
コード例 #13
0
ファイル: models.py プロジェクト: imperial-qore/COSCO
 def __init__(self):
     super(energy_latency2_10, self).__init__()
     self.name = "energy_latency2_10"
     self.find = nn.Sequential(nn.Linear(10 * 14, 128), nn.Softplus(),
                               nn.Linear(128, 128), nn.Softplus(),
                               nn.Linear(128, 64), nn.Tanhshrink(),
                               nn.Linear(64, 2), nn.Sigmoid())
コード例 #14
0
ファイル: models.py プロジェクト: imperial-qore/COSCO
 def __init__(self):
     super(energy_50, self).__init__()
     self.name = "energy_50"
     self.find = nn.Sequential(nn.Linear(50 * 51, 128), nn.Softplus(),
                               nn.Linear(128, 128), nn.Softplus(),
                               nn.Linear(128, 64), nn.Tanhshrink(),
                               nn.Linear(64, 1), nn.Sigmoid())
コード例 #15
0
ファイル: myModules.py プロジェクト: salinelake/cgsp
 def skipadd_forward(self, x):
     """FORWARD CALCULATION - amplitude part. skip connection through adding
     Args:
         configuration (Tensor): tensor with the shape (1,batch, channel, ncell).
         the configuration is already reordered if pbc is true
     Returns:
         output(Tensor): wave function amplitude with the shape (groups, batch, n_skipth, ncell) .
     """
     lrelu = nn.LeakyReLU(0.1)
     tanhsh = nn.Tanhshrink()
     x = self.causal_in(x)
     x = tanhsh(x)
     skip = 0
     for i, dil_ly in enumerate(self.dil_act):
         x = dil_ly(x)
         x = lrelu(x)
         skip += self.skip_1x1[i](
             x) / self.n_resch  #(1, batch, self.ngroup * nskipth, ncell)
     skip = lrelu(skip)
     skip = skip.permute(0, -1, 1, 2).reshape(self.ncell, -1,
                                              self.ngroup * self.n_skipch)
     skip = self.local_linear(skip)
     skip = skip.reshape(self.ncell, -1, self.ngroup,
                         self.n_skipch).permute(2, 1, 3, 0)
     return skip
コード例 #16
0
ファイル: myModules.py プロジェクト: salinelake/cgsp
    def forward(self, x):
        """FORWARD CALCULATION - amplitude part. direct skip connection
        Args:
            configuration (Tensor): tensor with the shape (1,batch, channel, ncell).
            the configuration is already reordered if pbc is true
        Returns:
            output(Tensor): wave function amplitude with the shape (groups, batch, n_skipth, ncell) .
        """
        nbatch = x.shape[1]
        lrelu = nn.LeakyReLU(0.1)
        tanhsh = nn.Tanhshrink()
        x = self.causal_in(x)
        x = tanhsh(x)
        skip = []
        for i, dil_ly in enumerate(self.dil_act):
            x = dil_ly(x)
            x = lrelu(x)  # #(1, batch, n_resch, ncell)
            skip.append(x.clone())
        skip = th.cat(skip, 2)  #(1, batch, n_resch*layers, ncell)
        skip = self.skip_conv(skip)  #(1, batch, n_resch, ncell)
        skip = lrelu(skip)
        skip = skip.squeeze(0).permute(-1, 0, 1)  #(ncell,batch, n_resch)
        skip = self.local_linear(skip)  #(ncell,batch, ngroup*n_skipch*2)
        skip = lrelu(skip)

        skip = skip.reshape(self.ncell, nbatch, self.ngroup,
                            -1).permute(0, 2, 1,
                                        3).reshape(self.ncell * self.ngroup,
                                                   nbatch, -1)
        skip = self.final_linear(skip)  # (ncell*ngroup,batch, n_skipch)
        skip = skip.reshape(self.ncell, self.ngroup, -1,
                            self.n_skipch).permute(1, 2, 3, 0)
        return skip
コード例 #17
0
 def mutateActivationFunction(self, id):
     # randomly re-select an activation function
     activationFunctionList = [nn.ReLU(), nn.Sigmoid(), nn.Tanh(),
                               nn.Tanhshrink(), nn.Hardtanh()]
     new_activationFunction = random.choice(activationFunctionList)
     return MyEncoding(id, self.learningRate, self.epochs, self.momentum, self.featuresUsed,
                       self.layerSizes, new_activationFunction)
コード例 #18
0
    def forward(self, data):

        act = nn.Tanhshrink()
        act = F.relu
        #act = nn.LeakyReLU(0.25)

        data.x = act(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = community_detection(data.internal_edge_index,
                                      data.num_nodes,
                                      edge_attr=None,
                                      batches=data.batches)
        data = community_pooling(cluster, data)

        data.x = act(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = community_detection(data.internal_edge_index,
                                      data.num_nodes,
                                      edge_attr=None)
        x, batch = max_pool_x(cluster, data.x, data.batch)

        x = scatter_mean(x, batch, dim=0)
        x = act(self.fc1(x))
        x = self.fc2(x)
        #x = F.dropout(x, training=self.training)

        return x
コード例 #19
0
ファイル: models.py プロジェクト: twoleggedeye/youtube8m
    def __init__(self, inner_size=1024, bil_size=32):
        super().__init__()
        self._layer_1 = nn.Linear(512 + 128, bil_size)
        self._layer_2 = nn.Bilinear(512 + 128, bil_size, inner_size)

        self._activation = nn.Tanhshrink()
        self._output = nn.Linear(inner_size, YOUTUBE8M_LABELS_N)
コード例 #20
0
ファイル: utils.py プロジェクト: zqp111/NewMB-813
def get_activation(activation_type):
    if activation_type == "relu":
        return nn.ReLU()
    elif activation_type == "relu6":
        return nn.ReLU6()
    elif activation_type == "prelu":
        return nn.PReLU()
    elif activation_type == "selu":
        return nn.SELU()
    elif activation_type == "celu":
        return nn.CELU()
    elif activation_type == "gelu":
        return nn.GELU()
    elif activation_type == "sigmoid":
        return nn.Sigmoid()
    elif activation_type == "softplus":
        return nn.Softplus()
    elif activation_type == "softshrink":
        return nn.Softshrink()
    elif activation_type == "softsign":
        return nn.Softsign()
    elif activation_type == "tanh":
        return nn.Tanh()
    elif activation_type == "tanhshrink":
        return nn.Tanhshrink()
    else:
        raise ValueError("Unknown activation type {}".format(activation_type))
コード例 #21
0
ファイル: nn_ops.py プロジェクト: yuguo68/pytorch
 def __init__(self):
     super(NNActivationModule, self).__init__()
     self.activations = nn.ModuleList([
         nn.ELU(),
         nn.Hardshrink(),
         nn.Hardsigmoid(),
         nn.Hardtanh(),
         nn.Hardswish(),
         nn.LeakyReLU(),
         nn.LogSigmoid(),
         # nn.MultiheadAttention(),
         nn.PReLU(),
         nn.ReLU(),
         nn.ReLU6(),
         nn.RReLU(),
         nn.SELU(),
         nn.CELU(),
         nn.GELU(),
         nn.Sigmoid(),
         nn.SiLU(),
         nn.Mish(),
         nn.Softplus(),
         nn.Softshrink(),
         nn.Softsign(),
         nn.Tanh(),
         nn.Tanhshrink(),
         # nn.Threshold(0.1, 20),
         nn.GLU(),
         nn.Softmin(),
         nn.Softmax(),
         nn.Softmax2d(),
         nn.LogSoftmax(),
         # nn.AdaptiveLogSoftmaxWithLoss(),
     ])
コード例 #22
0
ファイル: network.py プロジェクト: n778509775/NWCQ
    def __init__(self, num_inputs):
        super(Encoder, self).__init__()
        self.encoder = nn.Sequential(nn.BatchNorm1d(num_inputs),
                                     nn.Linear(num_inputs, num_inputs),
                                     nn.Dropout(0.5), nn.Tanhshrink(),
                                     nn.Linear(num_inputs, num_inputs))

        self.encoder.apply(init_weights)
コード例 #23
0
    def mutateEverything(self, id):
        # re-initialize everything

        # activationFunction options
        activationFunctionList = [nn.ReLU(), nn.Sigmoid(), nn.Tanh(),
                                 nn.Tanhshrink(), nn.Hardtanh()]
        return MyEncoding(id, 10**(-1*np.random.uniform(1, 6)), int(np.random.uniform(1, 500)), np.random.uniform(0, 0.99),
                          self.featuresUsed, 2+np.random.randint(79, size=(1, 1))[0][0], random.choice(activationFunctionList))
コード例 #24
0
    def __init__(
        self,
        in_manifold: RiemannianManifold,
        out_manifold: RiemannianManifold,
        in_dimension: int,
        out_dimension: int,
        non_linearity=None,
        num_poles=3,
        log_base_init: torch.Tensor = None,
        exp_base_init: torch.Tensor = None,
        ortho_init=False,
    ):
        super(ManifoldLayer, self).__init__()
        self.in_manifold = in_manifold
        self.out_manifold = out_manifold
        self.in_dimension = in_dimension
        self.out_dimension = out_dimension
        self.num_poles = num_poles
        if log_base_init is not None:
            self.log_base = ManifoldParameter(log_base_init,
                                              manifold=in_manifold,
                                              lr_scale=1)
        else:
            self.log_base = ManifoldParameter(torch.Tensor(
                num_poles, in_dimension),
                                              manifold=in_manifold,
                                              lr_scale=1)
        if exp_base_init is not None:
            self.exp_base = ManifoldParameter(exp_base_init,
                                              manifold=out_manifold,
                                              lr_scale=1)
        else:
            self.exp_base = ManifoldParameter(torch.Tensor(out_dimension),
                                              manifold=out_manifold,
                                              lr_scale=1)

        self.linear_layer = nn.Linear(in_dimension * num_poles,
                                      out_dimension,
                                      bias=False)
        if ortho_init:
            orthogonal_(self.linear_layer.weight)
            with torch.no_grad():
                self.linear_layer.weight /= sqrt(in_dimension)

        self.non_linearity = None
        self.non_linearity_name = non_linearity
        if non_linearity is not None:
            if non_linearity == "relu":
                self.non_linearity = nn.ReLU()
            elif non_linearity == "tanh":
                self.non_linearity = nn.Tanh()
            elif non_linearity == "tanhshrink":
                self.non_linearity = nn.Tanhshrink()
            elif non_linearity == "leakyrelu":
                self.non_linearity = nn.LeakyReLU()
            elif non_linearity == "elu":
                self.non_linearity = nn.ELU()
コード例 #25
0
 def __init__(self, args, kgemb_dim, encoder: Seq2VecEncoder, vocab):
     super().__init__(vocab)
     self.args = args
     self.encoder = encoder
     self.first_and_last_emb = encoder.get_output_dim()
     self.loss = nn.MSELoss()
     self.projector = nn.Linear(kgemb_dim, encoder.get_output_dim())
     self.projector2 = nn.Linear(kgemb_dim, encoder.get_output_dim())
     self.projector3 = nn.Linear(kgemb_dim, encoder.get_output_dim())
     self.nonlinear = nn.Tanhshrink()
コード例 #26
0
ファイル: models.py プロジェクト: imperial-qore/COSCO
 def __init__(self):
     super(energy_50_RL, self).__init__()
     self.name = "energy_RL"
     self.feature = nn.Sequential(nn.Linear(50 * 51, 128), nn.Softplus(),
                                  nn.Linear(128, 128), nn.Softplus())
     self.value = nn.Sequential(nn.Linear(128, 64), nn.Tanhshrink(),
                                nn.Linear(64, 1), nn.Sigmoid())
     self.action = nn.Sequential(nn.Linear(128, 256), nn.Softplus(),
                                 nn.Linear(256, 50 * 50))
     self.softmax = nn.Softmax(dim=1)
コード例 #27
0
ファイル: models.py プロジェクト: imperial-qore/COSCO
 def __init__(self):
     super(energy_latency_50_RL, self).__init__()
     self.name = "energy_latency_50_" + str(Coeff_Energy) + "_" + str(
         Coeff_Latency) + "_RL"
     self.feature = nn.Sequential(nn.Linear(50 * 52, 128), nn.Softplus(),
                                  nn.Linear(128, 128), nn.Softplus())
     self.value = nn.Sequential(nn.Linear(128, 64), nn.Tanhshrink(),
                                nn.Linear(64, 1), nn.Sigmoid())
     self.action = nn.Sequential(nn.Linear(128, 256), nn.Softplus(),
                                 nn.Linear(256, 10 * 10))
     self.softmax = nn.Softmax(dim=1)
コード例 #28
0
ファイル: rfc_main.py プロジェクト: NSWC-Crane/rf_zsl
 def __init__(self, input_size, feature_size):
     super().__init__()
     self.input_layer = nn.Linear(input_size, feature_size, bias=False)
     self.hidden_layer_1 = nn.Linear(16, 16, bias=False)
     self.hidden_layer_2 = nn.Linear(128, 128, bias=False)
     #self.hidden_layer_3 = nn.Linear(32, 128, bias=False)
     self.output_layer = nn.Linear(16, feature_size, bias=False)
     self.prelu = nn.PReLU(1, 0.25)
     self.silu = nn.SiLU()
     self.elu = nn.ELU()
     self.tanshrink = nn.Tanhshrink()
コード例 #29
0
 def __init__(self, input_size, hidden_size, hidden_depth, output_size,
              device):
     super(NeuralNetSimple, self).__init__()
     self.fc_in = nn.Linear(input_size, hidden_size).to(device)
     self.fcs = nn.ModuleList()  #collections.OrderedDict()
     self.hidden_depth = hidden_depth
     for i in range(self.hidden_depth):
         self.fcs.append(nn.Linear(hidden_size, hidden_size).to(device))
     self.fc_out = nn.Linear(hidden_size, output_size).to(device)
     #activations:
     self.ths = nn.Tanhshrink().to(device)
     self.l_relu = nn.LeakyReLU().to(device)
コード例 #30
0
    def __init__(self, n1=2, n2=4, n3=8, n4=2):
        super(Autoencoder, self).__init__()
        self.nl = nn.Tanhshrink()
        # encoder
        self.enc1 = nn.Linear(n1, n2)
        self.enc2 = nn.Linear(n2, n3)
        self.enc3 = nn.Linear(n3, n4)

        # decoder
        self.dec1 = nn.Linear(n4, n3)
        self.dec2 = nn.Linear(n3, n2)
        self.dec3 = nn.Linear(n2, n1)