Beispiel #1
0
 def __init__(self, c, classes):
     super(myconv, self).__init__()
     self.layer1 = nn.Sequential(nn.Conv2d(c, 6, 3, padding=1),
                                 nn.Conv2d(6, 11, 1), nn.Conv2d(11, 5, 1),
                                 nn.Softsign(), nn.Conv2d(5,
                                                          1,
                                                          3,
                                                          padding=1))
     self.layer2 = nn.Sequential(nn.Conv2d(1, 6, 3, padding=1),
                                 nn.Conv2d(6, 11, 1), nn.Conv2d(11, 5, 1),
                                 nn.Softsign(), nn.Conv2d(5,
                                                          1,
                                                          3,
                                                          padding=1))
     self.layer3 = nn.Sequential(nn.Conv2d(1, 6, 3, padding=1),
                                 nn.Conv2d(6, 11, 1), nn.Conv2d(11, 5, 1),
                                 nn.Softsign(), nn.Conv2d(5,
                                                          1,
                                                          3,
                                                          padding=1))
     self.layer4 = nn.Sequential(nn.Conv2d(1, 6, 3, padding=1),
                                 nn.Conv2d(6, 11, 1), nn.Conv2d(11, 5, 1),
                                 nn.Softsign(), nn.Conv2d(5,
                                                          1,
                                                          3,
                                                          padding=1))
     self.layer5 = nn.Sequential(nn.Conv2d(1, 6, 3, padding=1),
                                 nn.Conv2d(6, 11, 1), nn.Conv2d(11, 5, 1),
                                 nn.Softsign(), nn.Conv2d(5,
                                                          1,
                                                          3,
                                                          padding=1))
Beispiel #2
0
    def __init__(self, controller):
        super(SoftsignPolicy, self).__init__(controller)

        init_s_ = lambda m: init(
            m,
            nn.init.orthogonal_,
            lambda x: nn.init.constant_(x, 0),
            nn.init.calculate_gain("sigmoid"),
        )
        init_r_ = lambda m: init(
            m,
            nn.init.orthogonal_,
            lambda x: nn.init.constant_(x, 0),
            nn.init.calculate_gain("relu"),
        )

        state_dim = controller.state_dim
        h_size = 256
        self.critic = nn.Sequential(
            init_s_(nn.Linear(state_dim, h_size)),
            nn.Softsign(),
            init_s_(nn.Linear(h_size, h_size)),
            nn.Softsign(),
            init_s_(nn.Linear(h_size, h_size)),
            nn.Softsign(),
            init_r_(nn.Linear(h_size, h_size)),
            nn.ReLU(),
            init_r_(nn.Linear(h_size, h_size)),
            nn.ReLU(),
            init_s_(nn.Linear(h_size, 1)),
        )
Beispiel #3
0
    def __init__(self, env):
        # Construct a network.
        self.network = nn.Sequential(nn.Linear(env.stateSize, 32),
                                     nn.Softsign(), nn.Linear(32, 32),
                                     nn.Softsign(),
                                     nn.Linear(32, env.actionSize))

        # Initialize weights.
        for layer in self.network[::2]:
            nn.init.xavier_uniform_(layer.weight)
Beispiel #4
0
    def __init__(self, input_size, hidden_size, output_size):
        super(RNN, self).__init__()

        self.hidden_size = hidden_size

        # self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
        self.i2h = nn.Sequential(nn.Linear(input_size + hidden_size, 50),
                                 nn.Softsign(), nn.Linear(50, 15),
                                 nn.Softsign(), nn.Linear(15, hidden_size))
        # self.i2o = nn.Linear(input_size + hidden_size, output_size)
        self.i2o = nn.Sequential(nn.Linear(input_size + hidden_size, 50),
                                 nn.Softsign(), nn.Linear(50, 15),
                                 nn.Softsign(), nn.Linear(15, output_size))
Beispiel #5
0
    def __init__(
        self,
        feat_size=19,
        gather_width=64,
        k=2,
        neighbor_threshold=None,
        output_pool_result=False,
        bn_track_running_stats=False,
    ):
        super(PotentialNetPropagation, self).__init__()
        assert neighbor_threshold is not None

        self.neighbor_threshold = neighbor_threshold
        self.bn_track_running_stats = bn_track_running_stats
        self.edge_attr_size = 1

        self.k = k
        self.gather_width = gather_width
        self.feat_size = feat_size
        self.edge_network_nn = nn.Sequential(
            nn.Linear(self.edge_attr_size, int(self.feat_size / 2)),
            nn.Softsign(),
            nn.Linear(int(self.feat_size / 2), self.feat_size),
            nn.Softsign(),
        )

        self.edge_network = NNConv(
            self.feat_size,
            self.edge_attr_size * self.feat_size,
            nn=self.edge_network_nn,
            root_weight=True,
            aggr="add",
        )
        self.gate = GatedGraphConv(self.feat_size,
                                   self.k,
                                   edge_network=self.edge_network)

        self.attention = PotentialNetAttention(
            net_i=nn.Sequential(
                nn.Linear(self.feat_size * 2, self.feat_size),
                nn.Softsign(),
                nn.Linear(self.feat_size, self.gather_width),
                nn.Softsign(),
            ),
            net_j=nn.Sequential(nn.Linear(self.feat_size, self.gather_width),
                                nn.Softsign()),
        )
        self.output_pool_result = output_pool_result
        if self.output_pool_result:
            self.global_add_pool = global_add_pool
Beispiel #6
0
 def __init__(self, num_classes, is_training=True):
     super().__init__()
     self.is_training = is_training
     self.predoctor = torch.nn.Sequential(
                                 nn.Linear(4, 512),
                                 nn.BatchNorm1d(512),
                                 nn.Softsign(),
                                 nn.Linear(512, 1024),
                                 nn.BatchNorm1d(1024),
                                 nn.Softsign(),
                                 nn.Linear(1024, 512),
                                 nn.BatchNorm1d(512),
                                 nn.Dropout(0.5),
                                 nn.PReLU(),
                                 nn.Linear(512, num_classes))
Beispiel #7
0
 def __init__(self, n_input, n_hidden, n_output, n_part, k, device):
     super(BRNN, self).__init__()
     self.n_input = n_input
     self.n_hidden = n_hidden
     self.n_output = n_output
     self.n_part = n_part
     self.k = k  # not used yet :(
     self.n_blocks = 2 * n_part + n_part**2 + n_part**3  # count the total number of blocks
     self.size_partitions = int(n_hidden / n_part)
     self.ss = nn.Softsign()
     self.sp = nn.Softplus()
     self.encoder = nn.ModuleList(
         [nn.Linear(n_input, self.size_partitions) for n in range(n_part)])
     self.recurrent = nn.ModuleList([
         nn.Linear(n_hidden, self.size_partitions, bias=False)
         for n in range(n_part)
     ])
     self.modulator = nn.ModuleList(
         [nn.Linear(n_hidden, n_hidden, bias=False) for n in range(n_part)])
     self.decoder = nn.Linear(n_hidden, n_output)
     self.hidden_init = nn.Linear(1, n_hidden)
     self.regularize = nn.Linear(self.n_blocks, 1, bias=False)
     self.regularize.weight.data.uniform_(0, 0.01)
     self.reprojection()
     self.device = device
     self.to(device)
Beispiel #8
0
    def __init__(self,
                 channelsIn,
                 channelsDilated,
                 channelsOut,
                 baseKernelSize=7):
        super(MultiDilationUnit, self).__init__()

        self.preConv = ConstPaddedConv(channelsIn,
                                       channelsDilated,
                                       kernel_size=3,
                                       padding=1)

        self.dilationModules = torch.nn.ModuleList()
        dilations = [1, baseKernelSize - 2]

        numChannels = 0
        for dilation in dilations:
            for kernelSize in [baseKernelSize]:
                p = int(((kernelSize - 1) / 2) * dilation)
                conv = ConstPaddedConv(channelsDilated,
                                       channelsDilated,
                                       kernel_size=kernelSize,
                                       dilation=dilation,
                                       padding=p)
                self.dilationModules.append(conv)
                numChannels += channelsDilated

        self.combineConv = ConstPaddedConv(numChannels,
                                           channelsOut,
                                           kernel_size=1,
                                           padding=0)

        self.nonLin = nn.Softsign()
Beispiel #9
0
 def create_str_to_activations_converter(self):
     """Creates a dictionary which converts strings to activations"""
     str_to_activations_converter = {
         "elu": nn.ELU(),
         "hardshrink": nn.Hardshrink(),
         "hardtanh": nn.Hardtanh(),
         "leakyrelu": nn.LeakyReLU(),
         "logsigmoid": nn.LogSigmoid(),
         "prelu": nn.PReLU(),
         "relu": nn.ReLU(),
         "relu6": nn.ReLU6(),
         "rrelu": nn.RReLU(),
         "selu": nn.SELU(),
         "sigmoid": nn.Sigmoid(),
         "softplus": nn.Softplus(),
         "logsoftmax": nn.LogSoftmax(),
         "softshrink": nn.Softshrink(),
         "softsign": nn.Softsign(),
         "tanh": nn.Tanh(),
         "tanhshrink": nn.Tanhshrink(),
         "softmin": nn.Softmin(),
         "softmax": nn.Softmax(dim=1),
         "none": None
     }
     return str_to_activations_converter
Beispiel #10
0
 def _make_nn_dense(self):
     self.model = nn.Sequential(
         nn.Softsign(),
         nn.Linear(self.input_f, self.hidden),
         nn.Sigmoid(),
         nn.Linear(self.hidden, self.out)
     )
Beispiel #11
0
    def __init__(self, alpha=1.0):
        super().__init__()
        self.activations = [
            nn.ELU(),
            nn.Hardshrink(),
            nn.Hardtanh(),
            nn.LeakyReLU(),
            nn.LogSigmoid(),
            nn.ReLU(),
            nn.PReLU(),
            nn.SELU(),
            nn.CELU(),
            nn.Sigmoid(),
            nn.Softplus(),
            nn.Softshrink(),
            nn.Softsign(),
            nn.Tanh(),
            nn.Tanhshrink()
        ]

        self.P = [
            torch.nn.Parameter(torch.randn(1, requires_grad=True))
            for _ in self.activations
        ]

        for activation, param in zip(self.activations, self.P):
            activation_name = str(activation).split("(")[0]
            self.add_module(name=activation_name, module=activation)
            self.register_parameter(name=activation_name + "p", param=param)
    def __init__(self, opt):
        super(RewardModel, self).__init__()
        self.vocab_size = opt.vocab_size
        self.word_embed_dim = 300
        self.feat_size = opt.feat_size
        self.kernel_num = 512
        self.kernels = [2, 3, 4, 5]
        self.out_dim = len(
            self.kernels) * self.kernel_num + self.word_embed_dim

        self.emb = nn.Embedding(self.vocab_size, self.word_embed_dim)
        self.emb.weight.data.copy_(
            torch.from_numpy(np.load("VIST/embedding.npy")))

        self.proj = nn.Linear(self.feat_size, self.word_embed_dim)

        self.convs = [
            nn.Conv2d(1, self.kernel_num, (k, self.word_embed_dim))
            for k in self.kernels
        ]

        self.dropout = nn.Dropout(opt.dropout)

        self.fc = nn.Linear(self.out_dim, 1, bias=True)

        if opt.activation.lower() == "linear":
            self.activation = None
        elif opt.activation.lower() == "sign":
            self.activation = nn.Softsign()
        elif self.activation.lower() == "tahn":
            self.activation = nn.Tanh()
Beispiel #13
0
    def __init__(self, in_features, cond_features):
        """Constructor method
        """
        super(AffineCouplingLayer, self).__init__()
        # assert in_features % 2 == 0, '# input features must be evenly split,'\
        #     'but got {} features'.format(in_features)
        if in_features % 2 == 0:
            in_channels = in_features // 2 + cond_features
            out_channels = in_features
        else:
            # chunk is be (2, 1) if in_features==3
            in_channels = in_features // 2 + 1 + cond_features
            out_channels = in_features - 1

        # Initialize coupling network (Dense Block)
        num_layers = 2
        growth_rate = 1
        self.coupling_nn = nn.Sequential()
        self.coupling_nn.add_module(
            'dense_block',
            NoNormDenseBlock(num_layers,
                             in_channels,
                             growth_rate=growth_rate,
                             drop_rate=0.,
                             bottleneck=False))
        self.coupling_nn.add_module('relu1', nn.ReLU(inplace=True))
        self.coupling_nn.add_module(
            'zero_conv',
            Conv2dZeros(in_channels + growth_rate * num_layers, out_channels))

        self.softsign = nn.Softsign()
Beispiel #14
0
    def __init__(self, cfg):
        super(IBnet, self).__init__()

        self.cfg = cfg

        if cfg['ACTIVATION'] == 'relu':
            act_fun = nn.ReLU()
        elif cfg['ACTIVATION'] == 'tanh':
            act_fun = nn.Tanh()
        elif cfg['ACTIVATION'] == 'softsign':
            act_fun = nn.Softsign()
        elif cfg['ACTIVATION'] == 'softplus':
            act_fun = nn.Softplus()

        self.layer1 = nn.Sequential(nn.Linear(12, cfg['layersizes'][0]),
                                    act_fun)

        self.layer2 = nn.Sequential(
            nn.Linear(cfg['layersizes'][0], cfg['layersizes'][1]), act_fun)

        self.layer3 = nn.Sequential(
            nn.Linear(cfg['layersizes'][1], cfg['layersizes'][2]), act_fun)

        self.layer4 = nn.Sequential(
            nn.Linear(cfg['layersizes'][2], cfg['layersizes'][3]), act_fun)

        self.layer5 = nn.Sequential(
            nn.Linear(cfg['layersizes'][3], cfg['layersizes'][4]), act_fun)

        self.layer6 = nn.Sequential(
            nn.Linear(cfg['layersizes'][4], cfg['NUM_CLASSES']), act_fun)

        self.apply(init_weights)
Beispiel #15
0
def get_activation(act):
    """Get the activation based on the act string

    Parameters
    ----------
    act: str or callable function

    Returns
    -------
    ret: callable function
    """
    if act is None:
        return lambda x: x
    if isinstance(act, str):
        if act == 'leaky':
            return nn.LeakyReLU(0.1)
        elif act == 'relu':
            return nn.ReLU()
        elif act == 'tanh':
            return nn.Tanh()
        elif act == 'sigmoid':
            return nn.Sigmoid()
        elif act == 'softsign':
            return nn.Softsign()
        else:
            raise NotImplementedError
    else:
        return act
Beispiel #16
0
 def __init__(self, weight: Optional[torch.Tensor] = None, size_average=None, ignore_index: int = -100,
              reduce=None, reduction: str = 'mean') -> None:
     super(CrossEntropyRuntimeLoss, self).__init__(weight, size_average, reduce, reduction)
     self.ignore_index = ignore_index
     self.k_dims = 0.01
     self.k_depth = 3.0
     self.softsign = nn.Softsign()
 def outputs(self,
             in_channels,
             out_channels,
             kernel_size=3,
             stride=1,
             padding=0,
             bias=False,
             batchnorm=False):
     if batchnorm:
         layer = nn.Sequential(
             nn.Conv3d(in_channels,
                       out_channels,
                       kernel_size,
                       stride=stride,
                       padding=padding,
                       bias=bias), nn.BatchNorm3d(out_channels), nn.Tanh())
     else:
         layer = nn.Sequential(
             nn.Conv3d(in_channels,
                       out_channels,
                       kernel_size,
                       stride=stride,
                       padding=padding,
                       bias=bias), nn.Softsign())
     return layer
    def __init__(self, C, stride, rank):
        super(OperationLayer, self).__init__()
        self._ops = nn.ModuleList()
        for o in Operations:
            op = OPS[o](C, stride, False)
            self._ops.append(op)

        self.op_num = len(Operations)

        self.w_cp1 = torch.nn.Parameter(
            torch.Tensor(rank, C * self.op_num, rank))
        self.w_cp2 = torch.nn.Parameter(
            torch.Tensor(rank, C * self.op_num, rank))
        self.w_cp3 = torch.nn.Parameter(
            torch.Tensor(rank, C * self.op_num, rank))
        self.w_cp4 = torch.nn.Parameter(
            torch.Tensor(rank, C * self.op_num, rank))

        self.w_out3 = torch.nn.Parameter(torch.randn(rank, C, rank))

        with torch.no_grad():
            self.w_cp1.normal_(0, 1 / (C * self.op_num * rank))
            self.w_cp2.normal_(0, 1 / (C * self.op_num * rank))
            self.w_cp3.normal_(0, 1 / (C * self.op_num * rank))
            self.w_cp4.normal_(0, 1 / (C * self.op_num * rank))
            self.w_out3.normal_(0, 1 / C)

        self.softsign = nn.Softsign()
        self.norm = nn.InstanceNorm2d(C)
Beispiel #19
0
 def __init__(self):
     super(classification, self).__init__()
     self.classifier1 = nn.Sequential(
         nn.Linear(in_features=28*28, out_features=128),
         nn.Softsign(),
     )
     
     self.classifier2 = nn.Sequential(
         nn.Linear(in_features=128, out_features=64),
         nn.Softsign(),
     )
     
     self.classifier3 = nn.Sequential(
         nn.Linear(in_features=64, out_features=10),
         nn.LogSoftmax(dim=1),
     )
Beispiel #20
0
def str2act(s):
    if s is 'none':
        return None
    elif s is 'hardtanh':
        return nn.Hardtanh()
    elif s is 'sigmoid':
        return nn.Sigmoid()
    elif s is 'relu6':
        return nn.ReLU6()
    elif s is 'tanh':
        return nn.Tanh()
    elif s is 'tanhshrink':
        return nn.Tanhshrink()
    elif s is 'hardshrink':
        return nn.Hardshrink()
    elif s is 'leakyrelu':
        return nn.LeakyReLU()
    elif s is 'softshrink':
        return nn.Softshrink()
    elif s is 'softsign':
        return nn.Softsign()
    elif s is 'relu':
        return nn.ReLU()
    elif s is 'prelu':
        return nn.PReLU()
    elif s is 'softplus':
        return nn.Softplus()
    elif s is 'elu':
        return nn.ELU()
    elif s is 'selu':
        return nn.SELU()
    else:
        raise ValueError("[!] Invalid activation function.")
Beispiel #21
0
    def __init__(self,
                 encoder,
                 decoder,
                 global_attention,
                 memory_size=(1000, 2, 600),
                 noise_step=1e-5):
        """

        :param encoder:
        :param decoder:
        :param outlayer:
        :param streaming: must include 'df': df, 'encoder_data': cols, 'decoder_data': cols
        """
        super(model, self).__init__()
        self.enc = encoder
        self.dec = decoder
        self.gATTN = global_attention
        self.one_hot_random_ = torch.eye(self.dec.n_classes)

        ##### Memory functions
        # Must be defined manually
        # One way to do this for a variable length memory stack, could look like
        #  the following, external to the class
        #   stop = int(len(df['sent'].unique()/3)
        #   a, b, c = df['sent'].unique()[:stop], df['sent'].unique()[stop:stop * 2], df['sent'].unique()[stop * 2:len(df['sent'].unique())]
        #   sent_dic = sum([[(i, list(it).index(i)) for i in it] for it in [a,b,c]], [])
        #   sent_dic = {i[0]:i[1] for i in sent_dic}
        # This is fairly efficient overall, though not perfectly so.
        self.softsign = nn.Softsign(
        )  # We'll use this to artificially add noise to our training of the cosBahdanau function
        self.pull_memories = False
        self.noise_step = noise_step
        self.memory = torch.zeros(size=memory_size, requires_grad=False)
Beispiel #22
0
 def __init__(self):
     super(NNActivationModule, self).__init__()
     self.activations = nn.ModuleList([
         nn.ELU(),
         nn.Hardshrink(),
         nn.Hardsigmoid(),
         nn.Hardtanh(),
         nn.Hardswish(),
         nn.LeakyReLU(),
         nn.LogSigmoid(),
         # nn.MultiheadAttention(),
         nn.PReLU(),
         nn.ReLU(),
         nn.ReLU6(),
         nn.RReLU(),
         nn.SELU(),
         nn.CELU(),
         nn.GELU(),
         nn.Sigmoid(),
         nn.SiLU(),
         nn.Mish(),
         nn.Softplus(),
         nn.Softshrink(),
         nn.Softsign(),
         nn.Tanh(),
         nn.Tanhshrink(),
         # nn.Threshold(0.1, 20),
         nn.GLU(),
         nn.Softmin(),
         nn.Softmax(),
         nn.Softmax2d(),
         nn.LogSoftmax(),
         # nn.AdaptiveLogSoftmaxWithLoss(),
     ])
Beispiel #23
0
    def __init__(self, layers=[2, 2, 2, 2], num_classes=10):
        super(GResNet18, self).__init__()
        self.inplanes = 64
        self.dilation = 1
        self.groups = 1
        self.base_width = 64
        self.contrasts = 2**np.linspace(-2, 2, 5)

        self.lift = Lift(self.contrasts)
        self.conv1 = LiftedConv(3,
                                self.inplanes,
                                self.contrasts,
                                7,
                                stride=2,
                                padding=3,
                                bias=None)
        self.bn1 = LiftedBatchNorm2d(self.inplanes, self.contrasts)
        self.activation = nn.Softsign()
        self.pool = nn.AvgPool3d(kernel_size=(1, 3, 3),
                                 stride=(1, 2, 2),
                                 padding=(0, 1, 1),
                                 count_include_pad=False)

        self.dropout = torch.nn.Dropout3d(p=0.25)
        self.layer1 = self._make_layer(64, layers[0])
        self.layer2 = self._make_layer(128, layers[1], stride=2)
        self.layer3 = self._make_layer(256, layers[2], stride=2)
        self.layer4 = self._make_layer(512, layers[3], stride=2)
        self.final_avg_pool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * len(self.contrasts), num_classes)
    def __init__(self,
                 C_in,
                 C_out,
                 rank=16,
                 order=2,
                 share_core=True,
                 add1=False):
        super(tensor1x1conv_CP, self).__init__()

        self.order = order
        self.share_core = share_core
        self.add1 = add1

        if add1:
            C_in += 1
        if share_core:
            self.w_core = torch.nn.Parameter(torch.Tensor(rank, C_in, C_out))
        else:
            self.w_core = torch.nn.Parameter(
                torch.Tensor(order, rank, C_in, C_out))

        self.w_out = torch.nn.Parameter(torch.randn(rank))

        with torch.no_grad():
            self.w_core.normal_(0, 1 / (C_in))
            self.w_out.normal_(0, 1 / rank)

        self.softsign = nn.Softsign()
        self.norm = nn.InstanceNorm2d(C_out)
Beispiel #25
0
def get_activation(activation_type):
    if activation_type == "relu":
        return nn.ReLU()
    elif activation_type == "relu6":
        return nn.ReLU6()
    elif activation_type == "prelu":
        return nn.PReLU()
    elif activation_type == "selu":
        return nn.SELU()
    elif activation_type == "celu":
        return nn.CELU()
    elif activation_type == "gelu":
        return nn.GELU()
    elif activation_type == "sigmoid":
        return nn.Sigmoid()
    elif activation_type == "softplus":
        return nn.Softplus()
    elif activation_type == "softshrink":
        return nn.Softshrink()
    elif activation_type == "softsign":
        return nn.Softsign()
    elif activation_type == "tanh":
        return nn.Tanh()
    elif activation_type == "tanhshrink":
        return nn.Tanhshrink()
    else:
        raise ValueError("Unknown activation type {}".format(activation_type))
Beispiel #26
0
        def layer(in_size, out_size, final=False):
            layers = [nn.Linear(in_size, out_size)]

            if not final:
                layers.append(nn.Softsign())
                layers.append(nn.Dropout(0.1))

            return layers
 def __init__(self, env):
     self.network = nn.Sequential(
         nn.Linear(env.stateSize, 16),
         nn.Softsign(),
         # nn.Linear(16, 16),
         # nn.Softsign(),
         nn.Linear(16, env.actionSize),
         nn.Softmax(dim=-1))  # Note the softmax at the end!
Beispiel #28
0
    def __init__(self):
        nn.Module.__init__(self)

        self.actor = ActorModel()
        self.soft_sign = nn.Softsign()
        self.soft_plus = nn.Softplus(beta=1, threshold=20)

        self.scale = nn.Parameter(torch.ones(1))
 def __init__(self, layer: nn.Module, tau=0.5, c=0.1, device='cuda'):
     super().__init__()
     self.layer = layer
     self.tau = tau
     self.itau = 1 / (1 - tau)
     self.c = c
     self.softsign = nn.Softsign()
     self.device = device
Beispiel #30
0
 def __init__(self):
     super().__init__()
     self.lstm = nn.LSTM(1, 100, 1) # LSTM(input_size, hidden_layer, LSTM_layers)
     # self.lstm = nn.LSTM(24, 512, 1)
     self.fc1 = nn.Linear(100, 100)
     self.sigmoid = nn.Softsign()
     self.fc2 = nn.Linear(100, 3)
     self.dropout = nn.Dropout(p=0.2)
     self.softmax = nn.Softmax(dim=-1)