示例#1
0
 def __init__(self, in_ch, n_outputs, p=0.05):
     super(RecurrentNet, self).__init__()
     
     self.dean = DAIN_Layer(input_dim = in_ch-1, mode='adaptive_scale', mean_lr=0.0001, gate_lr=0.01, scale_lr=0.001)
     
     self.rnn1 = nn.LSTM(in_ch, 128, num_layers=2, bidirectional=True, batch_first=True)
     
     self.dropout = nn.Dropout(p=p)
     self.dropout2 = nn.Dropout(p=p*2)
     self.linear = nn.Linear(128*2 + 2048, n_outputs)
    def __init__(self, mode='adaptive_avg', mean_lr=0.00001, gate_lr=0.001, scale_lr=0.0001):
        super(MLP, self).__init__()

        self.base = nn.Sequential(
            nn.Linear(15 * 144, 512),
            nn.ReLU(),
            nn.Dropout(p=0.5),
            nn.Linear(512, 3)
        )

        self.dean = DAIN_Layer(mode=mode, mean_lr=mean_lr, gate_lr=gate_lr, scale_lr=scale_lr)
    def __init__(self, in_ch, n_outputs, p=0.05):
        super(MLP, self).__init__()

        self.dean = DAIN_Layer(input_dim=1,
                               mode='adaptive_avg',
                               mean_lr=0.0001,
                               gate_lr=0.01,
                               scale_lr=0.001)
        self.dropout = nn.Dropout(p)
        self.dropout2 = nn.Dropout(p * 2)
        self.linear = nn.Linear(time_steps - 1 + 1 + 2048, n_outputs)
示例#4
0
    def __init__(self, in_ch, n_outputs, p=0.05):
        super(MLP, self).__init__()

        self.dean = DAIN_Layer(input_dim=in_ch - 1,
                               mode='adaptive_scale',
                               mean_lr=0.0001,
                               gate_lr=0.01,
                               scale_lr=0.001)

        self.fc1 = FC((in_ch - 1) * (time_steps - 1) + 1, 64)
        self.fc2 = FC(64, 128)
        self.fc3 = FC(128, 256)

        self.dropout = nn.Dropout(p=p)
        self.dropout2 = nn.Dropout(p=p * 2)
        self.linear = nn.Linear(256 + 2048, n_outputs)
示例#5
0
    def __init__(self, in_ch, n_outputs, p=0.05):
        super(TNet, self).__init__()

        self.dean = DAIN_Layer(input_dim=in_ch - 1,
                               mode='adaptive_scale',
                               mean_lr=0.0001,
                               gate_lr=0.01,
                               scale_lr=0.001)

        self.conv = nn.Sequential(nn.Conv1d(in_ch, 64, 5, 1, 2), nn.ReLU())
        self.encoder_layer = nn.TransformerEncoderLayer(64, 8)
        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, 1)
        self.avg_pool = nn.AdaptiveMaxPool1d(1)
        self.dropout = nn.Dropout(p=p)
        self.dropout2 = nn.Dropout(p=p * 2)
        self.linear = nn.Linear(64 + 2048, n_outputs)
示例#6
0
    def __init__(self, in_ch, kernel, n_outputs, p=0.05):
        super(ConvNet, self).__init__()

        self.dean = DAIN_Layer(input_dim=in_ch - 1,
                               mode='adaptive_scale',
                               mean_lr=0.0001,
                               gate_lr=0.01,
                               scale_lr=0.001)

        pad = kernel // 2
        self.conv1 = Conv(in_ch, 64, kernel, pad)
        self.conv2 = Conv(64, 128, kernel, pad)
        self.conv3 = Conv(128, 256, kernel, pad)
        self.conv4 = Conv(256, 256, kernel, pad)
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.max_pool = nn.AdaptiveMaxPool1d(1)

        self.dropout = nn.Dropout(p=p)
        self.dropout2 = nn.Dropout(p=p * 2)
        self.fc1 = FC(256, 64)
        self.linear = nn.Linear(256 + 2048, n_outputs)